query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
run function my_squares and my_join
def run_my_funcs(x,y): print(x,y) my_squares(x) my_join(x,y) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_my_funcs(x, y):\n print\n my_squares(x)\n my_join(x, y)\n return 0", "def double_chop_pairs(\n x1, y1, z1, w1, cell1, x2, y2, z2, w2, indx2, rbins_squared, result):\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n n1 = x1.shape[0]\n nbins = rbins_squared.shape[0]\n\n for i in range(start, n1, stride):\n px = x1[i]\n py = y1[i]\n pz = z1[i]\n pw = w1[i]\n\n cell1_i = cell1[i]\n first = indx2[cell1_i]\n last = indx2[cell1_i+1]\n\n for j in range(first, last):\n qx = x2[j]\n qy = y2[j]\n qz = z2[j]\n qw = w2[j]\n\n dx = px-qx\n dy = py-qy\n dz = pz-qz\n wprod = pw*qw\n dsq = dx*dx + dy*dy + dz*dz\n\n k = nbins-1\n while dsq <= rbins_squared[k]:\n cuda.atomic.add(result, k-1, wprod)\n k = k-1\n if k <= 0:\n break", "def _run():\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)", "def multiple_eval_for_loops_v2():", "def operations(h, w):\r\n A=np.random.random([h,w])\r\n B=np.random.random([h,w])\r\n s=A+B\r\n return A,B,s\r\n raise NotImplementedError", "def run(brickheight,bricklength,walllength,wallheight,occupied=[],answer=[],globall=[]):\n if bricklength == brickheight:\n for t in range(walllength-bricklength+1):\n for s in range(wallheight-brickheight +1):\n column = t\n row = s\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n return answer\n else:\n return False\n if bricklength != brickheight:\n for t in range(walllength):\n for s in range(wallheight):\n column = t\n row = s\n\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer) and \\\n test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n occupied2 = occupied[:]\n answer2 = answer[:]\n\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied2,answer2)\n if not end(brickheight,bricklength,walllength,wallheight,occupied2,answer2):\n run(brickheight,bricklength,walllength,wallheight,occupied2,answer2,globall)\n else:\n globall.append(answer)\n \n elif test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n \n elif test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n return globall", "def main():\n# example_from_m3()\n# draw_you_guess_it()\n# draw_pink_square()\n draw_squares_in_squares()", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def multiple_eval_for_loops_v1():", "def run(self):\n\t\t#some additional initialization\n\t\tself.init()\n\t\t\n\t\t(conn, curs) = self.db_connect(self.hostname, self.dbname, self.schema)\n\t\tcurs.execute(\"begin\")\n\t\tif self.new_table:\n\t\t\tself.create_tables(curs, self.splat_table, self.mcl_table)\n\t\tcurs.execute(\"DECLARE crs CURSOR FOR select splat_id,edge_set from %s\"%self.table)\n\t\tcurs.execute(\"fetch 2000 from crs\")\n\t\trows = curs.fetchall()\n\t\tno_of_splats = 0\n\t\tno_of_mcl_results = 0\n\t\twhile rows:\n\t\t\tfor row in rows:\n\t\t\t\tsplat_id = row[0]\n\t\t\t\tgraph_dict = self.splat2graph_dict(row[1])\n\t\t\t\t#cracking here\n\t\t\t\tlistOfMclResult = self.crack_dict[self.type].run(splat_id, graph_dict, self.tmpinfname, \\\n\t\t\t\t\tself.tmpoutfname, self.parameter_a, self.parameter_b, \\\n\t\t\t\t\tself.argument1_dict[self.type], argument2=self.codense2db_instance, argument3=curs)\n\t\t\t\tif self.needcommit:\n\t\t\t\t\tself.submit(curs, self.splat_table, self.mcl_table, listOfMclResult)\n\t\t\t\tno_of_splats += 1\n\t\t\t\tno_of_mcl_results += len(listOfMclResult)\n\t\t\tif self.report:\n\t\t\t\tstring = 'Splat_patterns: %d, Mcl_results: %d'%(no_of_splats, no_of_mcl_results)\n\t\t\t\tsys.stderr.write('%s%s'%('\\x08'*80,string))\n\t\t\tcurs.execute(\"fetch 2000 from crs\")\n\t\t\trows = curs.fetchall()\n\t\tif self.needcommit:\n\t\t\tcurs.execute(\"end\")\n\t\t#02-25-05 cleanup the temp stuff\n\t\tif self.type==1:\n\t\t\tos.remove(self.tmpinfname)\n\t\t\tos.remove(self.tmpoutfname)\n\t\t\tos.rmdir(self.dir_files)", "def compute():\n dataset1 = 'project/data/dataset1.csv'\n dataset2 = \"project/data/dataset2.csv\"\n\n reader = CsvReader()\n\n data1 = reader.readCsv(dataset1)\n data2 = reader.readCsv(dataset2)\n\n database1 = DataBase(data1)\n database2 = DataBase(data2)\n\n Thread1 = threading.Thread(target=database1.fill, args= (1, ))\n Thread2 = threading.Thread(target=database2.fill, args= (2, ))\n\n\n Thread1.start()\n Thread2.start()", "def reduce_run():", "def mul_fns(f_and_df, g_and_dg):\n \"*** YOUR CODE HERE ***\"", "def _run(evaluation_dir_name, smoothing_radius_grid_cells,\n score_colour_map_name, num_ex_colour_map_name, max_colour_percentile,\n output_dir_name):\n\n if smoothing_radius_grid_cells <= 0:\n smoothing_radius_grid_cells = None\n\n score_colour_map_object = pyplot.get_cmap(score_colour_map_name)\n num_ex_colour_map_object = pyplot.get_cmap(num_ex_colour_map_name)\n error_checking.assert_is_geq(max_colour_percentile, 90.)\n error_checking.assert_is_leq(max_colour_percentile, 100.)\n\n grid_metafile_name = grids.find_equidistant_metafile(\n directory_name=evaluation_dir_name, raise_error_if_missing=True\n )\n\n print('Reading grid metadata from: \"{0:s}\"...'.format(grid_metafile_name))\n grid_metadata_dict = grids.read_equidistant_metafile(grid_metafile_name)\n print(SEPARATOR_STRING)\n\n num_grid_rows = len(grid_metadata_dict[grids.Y_COORDS_KEY])\n num_grid_columns = len(grid_metadata_dict[grids.X_COORDS_KEY])\n\n auc_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n csi_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n pod_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n far_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n num_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n num_positive_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n\n for i in range(num_grid_rows):\n for j in range(num_grid_columns):\n this_eval_file_name = model_eval.find_file(\n directory_name=evaluation_dir_name, grid_row=i, grid_column=j,\n raise_error_if_missing=False)\n\n if not os.path.isfile(this_eval_file_name):\n warning_string = (\n 'Cannot find file (this may or may not be a problem). '\n 'Expected at: \"{0:s}\"'\n ).format(this_eval_file_name)\n\n warnings.warn(warning_string)\n continue\n\n print('Reading data from: \"{0:s}\"...'.format(this_eval_file_name))\n this_evaluation_dict = model_eval.read_evaluation(\n this_eval_file_name)\n\n num_examples_matrix[i, j] = len(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n num_positive_examples_matrix[i, j] = numpy.sum(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n\n this_evaluation_table = this_evaluation_dict[\n model_eval.EVALUATION_TABLE_KEY]\n\n auc_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.AUC_KEY].values\n )\n csi_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.CSI_KEY].values\n )\n pod_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.POD_KEY].values\n )\n far_matrix[i, j] = 1. - numpy.nanmean(\n this_evaluation_table[model_eval.SUCCESS_RATIO_KEY].values\n )\n\n print(SEPARATOR_STRING)\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n if smoothing_radius_grid_cells is not None:\n print((\n 'Applying Gaussian smoother with e-folding radius of {0:.1f} grid '\n 'cells...'\n ).format(\n smoothing_radius_grid_cells\n ))\n\n orig_num_examples_matrix = num_examples_matrix + 0\n num_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_examples_matrix = numpy.round(num_examples_matrix).astype(int)\n num_examples_matrix[orig_num_examples_matrix == 0] = 0 # HACK\n\n num_positive_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_positive_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_positive_examples_matrix = (\n numpy.round(num_positive_examples_matrix).astype(int)\n )\n num_positive_examples_matrix[num_examples_matrix == 0] = 0\n\n auc_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(auc_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n csi_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(csi_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n pod_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(pod_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n far_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(far_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n panel_file_names = []\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # Plot number of examples.\n this_data_matrix = numpy.maximum(numpy.log10(num_examples_matrix), 0.)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=0., max_colour_value=max_colour_value,\n plot_cbar_min_arrow=False, plot_cbar_max_arrow=True, log_scale=True)\n\n axes_object.set_title(r'Number of examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')\n\n panel_file_names.append('{0:s}/num_examples.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot number of positive examples.\n this_data_matrix = num_positive_examples_matrix.astype(float)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n this_data_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=True)\n\n axes_object.set_title('Number of tornadic examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')\n\n panel_file_names.append(\n '{0:s}/num_positive_examples.jpg'.format(output_dir_name)\n )\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot AUC.\n max_colour_value = numpy.nanpercentile(auc_matrix, max_colour_percentile)\n min_colour_value = numpy.maximum(\n numpy.nanpercentile(auc_matrix, 100. - max_colour_percentile),\n 0.5\n )\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=auc_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('AUC (area under ROC curve)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')\n\n panel_file_names.append('{0:s}/auc.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot CSI.\n max_colour_value = numpy.nanpercentile(csi_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n csi_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=csi_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('CSI (critical success index)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')\n\n panel_file_names.append('{0:s}/csi.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot POD.\n max_colour_value = numpy.nanpercentile(pod_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n pod_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=pod_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('POD (probability of detection)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')\n\n panel_file_names.append('{0:s}/pod.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot FAR.\n max_colour_value = numpy.nanpercentile(far_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n far_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=far_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('FAR (false-alarm ratio)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(f)')\n\n panel_file_names.append('{0:s}/far.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Concatenate panels.\n concat_file_name = '{0:s}/spatially_subset_evaluation.jpg'.format(\n output_dir_name)\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_file_name))\n\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names, output_file_name=concat_file_name,\n num_panel_rows=NUM_PANEL_ROWS, num_panel_columns=NUM_PANEL_COLUMNS)\n\n imagemagick_utils.resize_image(\n input_file_name=concat_file_name, output_file_name=concat_file_name,\n output_size_pixels=CONCAT_FIGURE_SIZE_PX)", "def test_suite():\n test(sum_of_squares([2, 3, 4]) == 29)\n test(sum_of_squares([ ]) == 0)\n test(sum_of_squares([2, -3, 4]) == 29)", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def _executeScheme (self, x, y, startx, starty, key, wallfunc=None, ceilingedgefunc=None, ceilingfunc=None, rooffunc=None):\r\n\r\n if key in self.scheme:\r\n for op in self.scheme[key]:\r\n if op[0] == \"W\" and wallfunc:\r\n wallfunc(x*self.width + startx, self.yoff, y*self.depth + starty,\r\n self.width, self.depth, self.height,\r\n self.details[op[1]], mergeshape=self.model[op[1]])\r\n elif op[0] == \"C\" and ceilingfunc:\r\n ceilingfunc(x*self.width + startx, self.yoff, y*self.depth + starty,\r\n self.width, self.depth, self.height,\r\n self.details[op[1]], mergeshape=self.model[op[1]])\r\n elif op[0] == \"R\" and rooffunc:\r\n rooffunc(x*self.width + startx, self.yoff, y*self.depth + starty,\r\n self.width, self.depth, self.height,\r\n self.details[op[1]], mergeshape=self.model[op[1]])\r\n elif op[0] == \"CE\" and ceilingedgefunc:\r\n ceilingedgefunc(x*self.width + startx, self.yoff, y*self.depth + starty,\r\n self.width, self.depth, self.height,\r\n self.details[op[1]], mergeshape=self.model[op[1]])", "def process(self, lists, subqueries):\n pass", "def test_merge_two_two():\n run_merge([1, 3], [2, 4], [1, 2, 3, 4])", "def __call__(self, *columns, test_size, random_state=42):\n # print(quad_size, columns) if print is to stay, then use logging\n w = self.quad_size\n dataset = columns[1]\n print(self.quad_size)\n if w == 0:\n train_ids, test_ids = train_test_split([i for i in range(len(dataset))], test_size=test_size, random_state=random_state)\n else:\n r = np.random.RandomState(random_state)\n d_lon = r.random_sample()\n r_lat = np.random.RandomState(random_state + 10)\n d_lat = r_lat.random_sample()\n\n proj = OrderedDict()\n print('ici')\n for i, coor in enumerate(dataset):\n lon, lat = self._project(coor[0], coor[1])\n proj_lon = (lon + d_lon * w) // w\n proj_lat = (lat + d_lat * w) // w\n if (proj_lon, proj_lat) in proj:\n proj[(proj_lon, proj_lat)].append(i)\n else:\n proj[(proj_lon, proj_lat)] = [i]\n\n train_map, test_map = train_test_split(list(proj.keys()), test_size=test_size, random_state=random_state)\n\n train_ids = [i for quadra in train_map for i in proj[quadra]]\n test_ids = [i for quadra in test_map for i in proj[quadra]]\n # creating output\n r = []\n for col in columns:\n r.extend((col[train_ids], col[test_ids]))\n return r", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def join(self):\n pass", "def test_pooling(self):\n for width in range(2, 5):\n for width2 in range(1, width):\n matrix_size = (4, 5, width)\n matrix = get_random_test_tensor(size=matrix_size)\n pool_size = width2\n for stride in range(1, width2):\n for padding in range(2):\n reference = torch.nn.functional.avg_pool2d(\n matrix.unsqueeze(0), pool_size,\n stride=stride, padding=padding\n )\n\n encrypted_matrix = SharedTensor(matrix)\n encrypted_pool = encrypted_matrix.avg_pool2d(\n pool_size, stride=stride, padding=padding)\n self._check(\n encrypted_pool, reference[0], 'avg_pool2d failed')", "def run(self):\n\t\t(conn, curs) = db_connect(self.hostname, self.dbname, self.schema)\n\t\t\n\t\t\n\t\t#e_splat_table = self.table+'e'\n\t\t#e_mcl_table = self.mcl_table+'e'\n\t\t#CoexprFromCooccu_instance = CoexprFromCooccu()\n\t\t#pre_2nd_cc_hierarchy = CoexprFromCooccu_instance.data_fetch(curs, self.mcl_table, e_mcl_table)\n\t\t#mcl_id2cluster_dstructure = self.data_fetch(curs, self.table, self.mcl_table, crs_no=1)\n\t\t#mcl_id_2nd_order2cluster_dstructure = self.data_fetch(curs, e_splat_table, e_mcl_table, crs_no=2)\n\t\t#self.cluster_dstructure_output_with_both_hierarchy(curs, self.output_fname, pre_2nd_cc_hierarchy,\\\n\t\t#\tmcl_id2cluster_dstructure, mcl_id_2nd_order2cluster_dstructure)\n\t\t#self.cluster_dstructure_output(curs, self.output_fname, self.order_1st_id2all_clusters)\n\t\tself.data_fetch(curs, self.table, self.mcl_table, crs_no=1, output_fname=self.output_fname)", "def square():\r\n for _ in range(4):\r\n t.fd(200)\r\n t.rt(90)", "def _mutualSimplify(data1, data2):\n\n if len(data1) != 3:\n data1 = numpy.transpose(data1)\n if len(data1) != 3:\n raise ValueError(\"Wrong dimensions of data\")\n if len(data2) != 3:\n data2 = numpy.transpose(data2)\n if len(data2) != 3:\n raise ValueError(\"Wrong dimensions of data\")\n\n datax1 = numpy.array(data1[0], float, order=\"C\")\n datay1 = numpy.array(data1[1], float, order=\"C\")\n dataz1 = numpy.array(data1[2], float, order=\"C\")\n\n datax2 = numpy.array(data2[0], float, order=\"C\")\n datay2 = numpy.array(data2[1], float, order=\"C\")\n dataz2 = numpy.array(data2[2], float, order=\"C\")\n\n N1 = len(datax1)\n N2 = len(datax2)\n\n ret = numpy.array([1, 1])\n datax1, datay1, dataz1, datax2, datay2, dataz2, N1, N2 # eclipse warning removal\n code = r\"\"\"\n #line 264 \"binary_search.py\"\n int M = 0;\n int sum = 0;\n int t=0,s=0,k=0, k1;\n int turn=0;\n bool breakflag;\n\n int a;\n position1=vector<point>(N1);\n newposition1=vector<point>(N1);\n\n position2=vector<point>(N2);\n newposition2=vector<point>(N2);\n\n\n for (i=0;i<N1;i++)\n {\n position1[i].x = datax1[i] + 0.000000000000001*(rand()%1000);\n position1[i].y = datay1[i] +0.00000000000000001*(rand()%1000);\n position1[i].z = dataz1[i] + 0.00000000000000001*(rand()%1000);\n }\n\n for (i=0;i<N2;i++)\n {\n position2[i].x = datax2[i] + 0.000000000000001*(rand()%1000);\n position2[i].y = datay2[i] +0.0000000000000000001*(rand()%1000);\n position2[i].z = dataz2[i] + 0.0000000000000000001*(rand()%1000);\n }\n\n todelete1 = vector <int> (N1);\n todelete2 = vector <int> (N2);\n\n for (i=0;i<N1;i++) todelete1[i] == -2;\n for (i=0;i<N2;i++) todelete2[i] == -2;\n\n for (int ttt = 0; ttt < 1; ttt++)\n {\n turn++;\n M=0;\n for (i=0;i<N1;i++) todelete1[i] = -2;\n for (i=0;i<N2;i++) todelete2[i] = -2;\n\n for (int j=1;j<N1-1;j++) //going over all elements trying to delete\n {\n\n breakflag = false; //by default we delete thing\n for (k=0;k<N1;k++) //going over all triangles to check\n {\n if (k < j-2 || k > j+1)\n {\n if (k < N1 - 1) k1 = k + 1;\n else k1 = 0;\n sum = intersect(position1[j-1],position1[j],position1[\n j+1],position1[k],position1[k1]);\n if (sum!=0)\n {\n //printf(\"intersection at %d,%d\\n\",j,k);\n breakflag = true; //keeping thing\n break;\n }\n }\n }\n\n if (breakflag == false)\n {\n for (k=0;k<N2;k++) //going over all triangles to check\n {\n if (k < N2 - 1) k1 = k + 1;\n else k1 = 0;\n sum = intersect(position1[j-1],position1[j],position1[\n j+1],position2[k],position2[k1]);\n if (sum!=0)\n {\n //printf(\"crossintersection at %d,%d\\n\",j,k);\n breakflag = true; //keeping thing\n break;\n }\n }\n }\n\n if (breakflag ==false)\n {\n todelete1[M++] = j;\n position1[j] = (position1[j-1] + position1[j+1])* 0.5;\n //printf(\"%d will be deleted at %d\\n\",j,k);\n j++;\n //break;\n }\n }\n t = 0;//pointer for todelete\n s = 0;//pointer for newposition\n if (M==0)\n {\n break;\n }\n for (int j=0;j<N1;j++)\n {\n if (todelete1[t] == j)\n {\n t++;\n continue;\n }\n else\n {\n newposition1[s++] = position1[j];\n }\n }\n N1 = s;\n M = 0;\n t = 0;\n position1 = newposition1;\n }\n\n ret[0] = N1;\n ret[1] = N2;\n\n for (i=0;i<N1;i++)\n {\n datax1[i] = position1[i].x;\n datay1[i] = position1[i].y;\n dataz1[i] = position1[i].z;\n }\n for (i=0;i<N2;i++)\n {\n datax2[i] = position2[i].x;\n datay2[i] = position2[i].y;\n dataz2[i] = position2[i].z;\n }\n\n \"\"\"\n support = r\"\"\"\n#line 415 \"binary_search.py\"\n#include <cstdlib>\n#include <iostream>\n#include <iomanip>\n#include <cmath>\n#include <vector>\n#include <ctime>\n#include <omp.h>\n#include <stdio.h>\nusing namespace std;\nstruct point{\n double x,y,z;\n point operator + (const point &p) const {\n return (point) {x+p.x, y+p.y, z+p.z};\n }\n point operator - (const point &p) const {\n return (point) {x-p.x, y-p.y, z-p.z};\n }\n/* cross product */\n point operator * (const point &p) const {\n return (point) {y*p.z - z*p.y,\n z*p.x - x*p.z,\n x*p.y - y*p.x};\n }\n point operator * (const double &d) const {\n return (point) {d*x, d*y, d*z};\n }\n\n point operator / (const double &d) const {\n return (point) {x/d, y/d, z/d};\n }\n};\n\nvector <point> position1;\nvector <point> newposition1;\nvector <int> todelete1;\nint N1;\nvector <point> position2;\nvector <point> newposition2;\nvector <int> todelete2;\nint N2;\n\n\nint i;\ndouble dist1(int i,int j);\ndouble dist2(int i,int j);\ndouble dotProduct(point a,point b);\nint intersect(point t1,point t2,point t3,point r1,point r2);\n\ninline double sqr(double x){\n return x*x;\n}\ninline double dist1(int i,int j){\nreturn sqrt(dotProduct((position1[i]-position1[j]),(position1[i]-position1[j])));\n}\n\ninline double dist2(int i,int j){\nreturn sqrt(dotProduct((position2[i]-position2[j]),(position2[i]-position2[j])));\n}\n\ninline double dist(point a,point b){\n return sqr(a.x-b.x)+sqr(a.y-b.y)+sqr(a.z-b.z);\n}\n\ninline double dotProduct(point a,point b){\n return a.x*b.x+a.y*b.y+a.z*b.z;\n}\n\nint intersect(point t1,point t2,point t3,point r1,point r2)\n{\npoint A,B,C,D,n;\nint r;\ndouble det,t,u,v,c1,d1,d2,d3;\nB = t2 - t1;\nC = t3 - t1;\nD = r2 - t1;\nA = r2 - r1;\n\nd1 = (B.y*C.z-C.y*B.z);\nd2 = (B.x*C.z-B.z*C.x);\nd3 = (B.x*C.y-C.x*B.y);\ndet = A.x*d1-A.y*d2+A.z*d3;\nif (det == 0) return 0;\nif (det >0){\nt = D.x*d1-D.y*d2+D.z*d3;\nif (t<0 || t>det) return 0;\nu = A.x*(D.y*C.z-C.y*D.z)-A.y*(D.x*C.z-D.z*C.x)+A.z*(D.x*C.y-C.x*D.y);\nif (u<0 || u>det) return 0;\nv = A.x*(B.y*D.z-D.y*B.z)-A.y*(B.x*D.z-B.z*D.x)+A.z*(B.x*D.y-D.x*B.y);\nif (v<0 || v>det || (u+v)>det) return 0;\n//printf(\"\\n%lf,%lf,%lf, \",t/det,u/det,v/det);\nn = B*C;\nc1 = dotProduct(r1-t1,n);\nif (c1>0) return 1;\nelse return -1;\n}\nelse{\nt = D.x*d1-D.y*d2+D.z*d3;\nif (t>0 || t<det) return 0;\nu = A.x*(D.y*C.z-C.y*D.z)-A.y*(D.x*C.z-D.z*C.x)+A.z*(D.x*C.y-C.x*D.y);\nif (u>0 || u<det) return 0;\nv = A.x*(B.y*D.z-D.y*B.z)-A.y*(B.x*D.z-B.z*D.x)+A.z*(B.x*D.y-D.x*B.y);\nif (v>0 || v<det || (u+v)<det) return 0;\n//printf(\"\\n%lf,%lf,%lf, \",t/det,u/det,v/det);\nn = B*C;\nc1 = dotProduct(r1-t1,n);\nif (c1>0) return 1;\nelse return -1;\n}\n}\n//DNA conformation\n\"\"\"\n from scipy import weave\n weave.inline(code, ['datax1', 'datay1', 'dataz1', 'N1',\n 'datax2', 'datay2', 'dataz2', 'N2', 'ret'],\n extra_compile_args=['-malign-double'], support_code=support)\n\n data1 = numpy.array([datax1, datay1, dataz1]).T\n data2 = numpy.array([datax2, datay2, dataz2]).T\n\n return data1[:ret[0]], data2[:ret[1]]", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def test_combine_pointers(workers):\n\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n y = th.tensor([1, 2, 3, 4, 5]).send(alice)\n\n a = sy.combine_pointers(*[x, y])\n b = a + a\n\n c = b.get(sum_results=True)\n assert (c == th.tensor([4, 8, 12, 16, 20])).all()\n\n b = a + a\n c = b.get(sum_results=False)\n assert len(c) == 2\n assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all", "def RUN():\n TABLE_info = get_TABLE_info()\n m1_info, m2_info, m3_info, m4_info = get_material_info(TABLE_info)\n\n TSUGITE_name, SHIGUCHI_name, offset = ask_KUMIKI()\n\n # TSUGITE_list = [m2_left_list, m2_right_list, m3_left_list, m3_right_list]\n TSUGITE_list, m2_SEN_info, m3_SEN_info = make_TSUGITE_list(TSUGITE_name, m2_info, m3_info, m4_info, offset)\n\n # SHIGUCHI_list = [m2_KUMIKI_points1, m2_KUMIKI_points2, m3_KUMIKI_points1, m3_KUMIKI_points2]\n SHIGUCHI_list = make_SHIGUCHI_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n # m1\n m1_male_points_list, m1_male_SEN_info = make_male_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n m1_female_points_list, m1_female_SEN_info = make_female_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n m1_male_crvs = make_m1_male_crv(m1_male_points_list)\n m1_female_crvs = make_m1_female_crv(m1_female_points_list)\n\n # m2, m3 crvs\n m2_male_left_crvs, m2_male_right_crvs = make_m2_crv(TSUGITE_list, SHIGUCHI_list)\n m3_male_left_crvs, m3_male_right_crvs = make_m3_crv(TSUGITE_list, SHIGUCHI_list)\n\n m2_female_left_crvs = rs.CopyObjects(m2_male_left_crvs)\n m2_female_right_crvs = rs.CopyObjects(m2_male_right_crvs)\n m3_female_left_crvs = rs.CopyObjects(m3_male_left_crvs)\n m3_female_right_crvs = rs.CopyObjects(m3_male_right_crvs)\n\n # m4\n m4_male_points_list, m4_male_SEN_info = make_male_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n m4_female_points_list, m4_female_SEN_info = make_female_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n m4_male_crvs = make_m4_male_crv(m4_male_points_list)\n m4_female_crvs = make_m4_female_crv(m4_female_points_list)\n\n make_SEN_crvs(m1_male_SEN_info, m1_female_SEN_info, m2_SEN_info, m3_SEN_info, m4_male_SEN_info, m4_female_SEN_info, offset)\n\n # Make 3D\n male_models = make_male_3D_model\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs)\n\n female_models = make_female_3D_model\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs)\n\n # Deploy crvs (processing data)\n deploy_male_crvs\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs)\n\n deploy_female_crvs\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs)\n\n make_board(TABLE_info)\n\n rs.ZoomExtents()\n pass", "def _add_splines(c, b1, d, b2):\n if b1 == S.Zero or c == S.Zero:\n rv = piecewise_fold(d * b2)\n elif b2 == S.Zero or d == S.Zero:\n rv = piecewise_fold(c * b1)\n else:\n new_args = []\n n_intervals = len(b1.args)\n # Just combining the Piecewise without any fancy optimization\n p1 = piecewise_fold(c * b1)\n p2 = piecewise_fold(d * b2)\n\n # Search all Piecewise arguments except (0, True)\n p2args = list(p2.args[:-1])\n\n # This merging algorithm assumes the conditions in\n # p1 and p2 are sorted\n for arg in p1.args[:-1]:\n # Conditional of Piecewise are And objects\n # the args of the And object is a tuple of two\n # Relational objects the numerical value is in the .rhs\n # of the Relational object\n expr = arg.expr\n cond = arg.cond\n\n lower = cond.args[0].rhs\n\n # Check p2 for matching conditions that can be merged\n for i, arg2 in enumerate(p2args):\n expr2 = arg2.expr\n cond2 = arg2.cond\n\n lower_2 = cond2.args[0].rhs\n upper_2 = cond2.args[1].rhs\n\n if cond2 == cond:\n # Conditions match, join expressions\n expr += expr2\n # Remove matching element\n del p2args[i]\n # No need to check the rest\n break\n elif lower_2 < lower and upper_2 <= lower:\n # Check if arg2 condition smaller than arg1,\n # add to new_args by itself (no match expected\n # in p1)\n new_args.append(arg2)\n del p2args[i]\n break\n\n # Checked all, add expr and cond\n new_args.append((expr, cond))\n\n # Add remaining items from p2args\n new_args.extend(p2args)\n\n # Add final (0, True)\n new_args.append((0, True))\n\n rv = Piecewise(*new_args)\n\n return rv.expand()", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def dojoin(ipath1,ipath2,opath):\n r1 = '%s.map' % ipath1\n r2 = '%s.map' % ipath2\n if not mapsMatch(r1,r2):\n print '### maps %s and %s do not match' % (r1,r2)\n sys.exit(1)\n outpath = '%s.map' % opath\n shutil.copyfile(r1,outpath)\n r1 = '%s.eigenstratgeno' % ipath1\n r2 = '%s.eigenstratgeno' % ipath2\n outpath = '%s.eigenstratgeno' % opath\n joinRows(r1,r2,outpath)\n outpath = '%s.ind' % opath\n r1 = '%s.ind' % ipath1\n r2 = '%s.ind' % ipath2\n joinInds(r1,r2,outpath)", "def squareSearch( self, tTopLeft, tBottomRight, function, argsList ): #by LOQ\n\t\ttPaintedList = []\n\t\tresult = None\n\t\tfor x in range(tTopLeft[0], tBottomRight[0]+1):\n\t\t\tfor y in range(tTopLeft[1], tBottomRight[1]+1, -1): # edead: added -1, not sure why it didn't work before\n\t\t\t\tresult, bPaintPlot, bContinueSearch = function((x, y), result, argsList)\n\t\t\t\tif bPaintPlot: # paint plot\n\t\t\t\t\ttPaintedList.append((x, y))\n\t\t\t\tif not bContinueSearch: # goal reached, so stop\n\t\t\t\t\treturn result, tPaintedList\n\t\treturn result, tPaintedList", "def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])", "def shp_join(filein, folderin, folderout, folderno):\n def performSpatialJoin(base_vec, base_lyr, join_vec, join_lyr, output_vec, output_lyr):\n import geopandas\n # Must have rtree installed - otherwise error \"geopandas/tools/sjoin.py\"\n # AttributeError: 'NoneType' object has no attribute 'intersection'\n base_gpd_df = geopandas.read_file(base_vec)\n join_gpg_df = geopandas.read_file(join_vec)\n \n join_gpg_df = geopandas.sjoin(base_gpd_df, join_gpg_df, how=\"inner\", op=\"within\")\n join_gpg_df.to_file(output_vec)\n\n def run_join(params):\n base_vec = params[0]\n join_vec = params[1]\n output_vec = params[2]\n performSpatialJoin(base_vec, '', join_vec, '', output_vec, '')\n \n split_files = glob.glob(folderin.format(folderno))\n\n\n params = []\n for filename in split_files:\n basename = os.path.splitext(os.path.basename(filename))[0]\n output_file = os.path.join(folderout, \"{}_join.shp\".format(basename))\n params.append([filename, filein, output_file])\n\n\n ncores = 50\n p = Pool(ncores)\n p.map(run_join, params)", "def join(self, blockOne: ghidra.program.model.mem.MemoryBlock, blockTwo: ghidra.program.model.mem.MemoryBlock) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def undrawGrid(draw,points,coeff2,newColorArray):\r\n ## This is the merge function.\r\n ## If two neighboring regions are considered to be the same color by the comparison function then this function replaces the black line between them by a line of their color (making it invisible).\r\n for j in range(0,coeff2-1):\r\n if comparisonFunction(newColorArray[0][j],newColorArray[0][j+1]):\r\n draw.line((points*0, points*(j+1), points*(0+1)-3, points*(j+1)), fill=newColorArray[0][j], width=lineWidth)\r\n\r\n for i in range(0,coeff2-1):\r\n if comparisonFunction(newColorArray[i][0],newColorArray[i+1][0]):\r\n draw.line((points*(i+1), points*0, points*(i+1), points*1-3), fill=newColorArray[i][0], width=lineWidth)\r\n\r\n for i in range(1,coeff2):\r\n for j in range(1,coeff2):\r\n if comparisonFunction(newColorArray[i][j],newColorArray[i][j-1]):\r\n draw.line((points*i+3, points*j, points*(i+1)-3, points*j), fill=newColorArray[i][j], width=lineWidth)\r\n\r\n if comparisonFunction(newColorArray[i][j],newColorArray[i-1][j]):\r\n draw.line((points*i, points*j+3, points*i, points*(j+1)-3), fill=newColorArray[i][j], width=lineWidth)", "def main():\n subjectlist = ['hel{}'.format(i) for i in range(1, 20) if i is not 9]\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'randomise_setup_fslmerge'))\n logfile.info('Setup for randomise.')\n logfile.info('Making a 4D data set by combining images')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n for subclust_n in range(1, 4):\n outfilename = os.path.join(outdir,\n 'knnward_clst1_subclust{}_4Dfile'.format(\n subclust_n))\n mergefsl(logfile, make_file_list(subjectlist, subclust_n), outfilename)", "def run(self):\n\t\tfrom loc import loc as Loc\n\t\tfor r in range(1,self.size):\n\t\t\tfor c in range(self.size): \n\t\t\t\tthis = Loc(r,c)\n\t\t\t\tself.state.set_cell(this, self.rule(self.neighbor_vals(this), self.__prob))\n\t\tself.__ran = True", "def test_multisize():\r\n sdfg = dace.SDFG('test')\r\n N = dace.symbol('N')\r\n sdfg.add_transient('A', [N], dace.float64)\r\n sdfg.add_array('__return', [1], dace.float64)\r\n sdfg.add_symbol('cond', dace.uint64)\r\n\r\n init = sdfg.add_state()\r\n state1 = sdfg.add_state()\r\n state2 = sdfg.add_state()\r\n cnvrg = sdfg.add_state()\r\n state21 = sdfg.add_state()\r\n state22 = sdfg.add_state()\r\n final = sdfg.add_state()\r\n sdfg.add_edge(init, state1, dace.InterstateEdge('cond == 1', assignments=dict(N=1)))\r\n sdfg.add_edge(init, state2, dace.InterstateEdge('cond != 1', assignments=dict(N=2)))\r\n sdfg.add_edge(state1, cnvrg, dace.InterstateEdge())\r\n sdfg.add_edge(state2, cnvrg, dace.InterstateEdge())\r\n sdfg.add_edge(cnvrg, state21, dace.InterstateEdge('cond == 0'))\r\n sdfg.add_edge(cnvrg, state22, dace.InterstateEdge('cond != 0'))\r\n sdfg.add_edge(state21, final, dace.InterstateEdge())\r\n sdfg.add_edge(state22, final, dace.InterstateEdge())\r\n\r\n t = state21.add_tasklet('firstset', {}, {'o'}, 'o = 5')\r\n w = state21.add_write('A')\r\n state21.add_edge(t, 'o', w, None, dace.Memlet('A[0]'))\r\n\r\n t = state22.add_tasklet('secondset', {}, {'o'}, 'o = 6')\r\n w = state22.add_access('A')\r\n state22.add_edge(t, 'o', w, None, dace.Memlet('A[0]'))\r\n\r\n r = final.add_read('A')\r\n t = final.add_tasklet('writeout', {'a'}, {'b'}, 'b = a')\r\n w = final.add_write('__return')\r\n final.add_edge(r, None, t, 'a', dace.Memlet('A[0]'))\r\n final.add_edge(t, 'b', w, None, dace.Memlet('__return[0]'))\r\n\r\n # Make sure array is allocated once\r\n code = sdfg.generate_code()[0].clean_code\r\n assert code.count('new double') == 1\r\n assert code.count('delete[]') == 1\r\n\r\n res1 = sdfg(cond=0)\r\n res2 = sdfg(cond=1)\r\n\r\n assert np.allclose(res1, 5)\r\n assert np.allclose(res2, 6)", "def matrix_2bound(self,fun):\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(self.Nx,Nxp))\n xs,ps,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n mat_over_out = 0\n for k in range(self.Nx):\n Mat = fun(xs[k,:],ps[k,:])\n mat_over = np.linalg.norm(Mat,ord=2)\n if mat_over > mat_over_out:\n mat_over_out = mat_over\n return mat_over_out", "def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def draw_squares_in_squares():\n window = rg.TurtleWindow()\n\n square_turtle = rg.SimpleTurtle('turtle')\n square_turtle.pen = rg.Pen('yellow', 6)\n square_turtle.speed = 1000 # Fast\n\n\n size = 200\n delta = 20\n\n # Do the indented code 13 times. Each time draws a square.\n for _ in range(20):\n square_turtle.draw_square(size)\n\n # Move \"inside\" the previous square a bit.\n square_turtle.pen_up()\n point_inside = rg.Point(square_turtle.x_cor() + (delta // 2),\n square_turtle.y_cor() - (delta // 2))\n square_turtle.go_to(point_inside)\n square_turtle.pen_down()\n\n # Next square will be a bit smaller.\n size = size - 20\n\n\n square_turtle = rg.SimpleTurtle('turtle')\n square_turtle.pen = rg.Pen('red', 10)\n square_turtle.speed = 40 # Fast\n square_turtle.pen_up()\n square_turtle.backward(40)\n square_turtle.pen_down()\n\n for _ in range(20):\n square_turtle.draw_square(size)\n\n # Move \"inside\" the previous square a bit.\n square_turtle.pen_up()\n point_inside = rg.Point(square_turtle.x_cor() - delta,\n square_turtle.y_cor() - delta)\n square_turtle.go_to(point_inside)\n square_turtle.pen_down()\n\n # Next square will be a bit smaller.\n size = size - 1\n delta = delta - 1\n window.close_on_mouse_click()", "def join(self, batches: List[Batch], outpath: str) -> None:\n kwargs = self._pre_join(outpath)\n\n crawler = Crawler()\n print(\"Joining...\")\n for batch in crawler.do_batch(batches):\n self.join_function(*batch, **kwargs)\n\n self._post_join(**kwargs)", "def main():\n\tfor combo in combinations:\n\t\twords1 = collection.find_one({\"keyword\": combo[0]})[\"wordcounts\"]\n\t\twords2 = collection.find_one({\"keyword\": combo[1]})[\"wordcounts\"]\n\t\tRMS = calcRMS(words1, words2)\n\t\tdb.RMS.insert({\"keywords\": list(combo), \"RMS\": RMS})\n\t\tprint combo, \"has an RMS of\", RMS", "def mineSweeper():\n clear()\n size_x = ask_numbers(\"Syötä kentän leveys ruuduissa\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\")\n size_y = ask_numbers(\"Syötä kentän korkeus ruuduissa\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\")\n mineQnt = ask_numbers(\"Syötä miinojen lukumäärä\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\") \n while mineQnt > (size_x * size_y - 1):\n print(\"Syötä miinojen lukumäärä siten, että ne mahtuvat kentälle(Max {} kpl)\". format(size_x * size_y - 1))\n mineQnt = ask_numbers(\"Syötä miinojen lukumäärä\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\") \n print(\"aloitetaan peli kentällä, jonka koko on {} x {}, \\nja laudalta löytyy piilotettuna {} miinaa\".format(size_x, size_y, mineQnt))\n duration, result, moves = drawingService.main(size_x, size_y, mineQnt)\n return duration, size_x, size_y, result, moves", "def _compile_ops(self, parameters, space_group):\n ke = self.wave_numbers[0]\n kw = self.wave_numbers[1]\n ki = self.wave_numbers[2:]\n mu = 1\n cavities = self.cavities\n ops = {}\n def add(i, j, op, key='default'):\n if (i, j) not in ops:\n ops[(i, j)] = {key: op}\n else:\n if key in ops[(i, j)]:\n raise ValueError(\"Duplicate key value provided in operator construction\")\n else:\n ops[(i, j)][key] = op\n\n # cavities\n for row, _ in enumerate(cavities):\n for col, _ in enumerate(cavities):\n if row == col:\n add(\n row, col,\n -1 * self.multitrace_operator(ki[row], mu, cavities[row], parameters=parameters, space_group=space_group)\n )\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[row], parameters=parameters, space_group=space_group),\n key='wall'\n )\n else:\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[col], target=cavities[row], parameters=parameters, space_group=space_group)\n ),\n # # self to wall\n add(\n row, col+1,\n self.multitrace_operator(kw, mu, self.main, target=cavities[row], parameters=parameters, space_group=space_group)\n )\n \n for col, cavity in enumerate(cavities):\n add(\n row+1, col,\n -1 * self.multitrace_operator(kw, mu, cavity, target=self.main, parameters=parameters, space_group=space_group)\n )\n \n # external boundary\n add(\n row+1, col+1,\n self.multitrace_operator(kw, mu, self.main, parameters=parameters, space_group=space_group),\n key='wall'\n\n )\n add(\n row+1, col+1,\n self.multitrace_operator(ke, mu, self.main, parameters=parameters, space_group=space_group),\n key='exterior'\n )\n # finished\n return ops", "def total_power_square(x, y, serial, size):\n result = 0\n for i in range(x, x + size):\n for j in range(y, y + size):\n result += cell_power(i, j, serial)\n return result", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def test010_similarity(self, b_size=8, dim=1024,\n alpha_fwd=0.999, alpha_bkw=0.99, eps=1e-05, itrs=8):\n # instantiate inputs\n input = torch.randn(b_size, dim)\n input_0 = input.clone().detach().requires_grad_(True)\n input_1 = input.clone().detach().requires_grad_(True)\n # instantiate gradient at the output\n grad_out = torch.randn(b_size, dim)\n\n # instantiate Linearized Online Norm class\n onlin = OnlineNorm1D(dim, alpha_fwd=alpha_fwd, alpha_bkw=alpha_bkw,\n eps=eps, b_size=b_size)\n\n # instantiate Looping Online Norm class\n onloop = OnlineNorm1D(dim, eps=eps,\n ctrl_norm=ControlNorm1DLoop(dim,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n eps=eps))\n\n for _ in range(itrs):\n # fprop through Linearized Online Norm class\n y_0 = onlin(input_0)\n # bprop through Linearized Online Norm class\n y_0.backward(grad_out)\n # fprop through Looping Online Norm class\n y_1 = onloop(input_1)\n # bprop through Looping Online Norm class\n y_1.backward(grad_out)\n\n # numerically compare output\n np.testing.assert_allclose(y_0.detach().numpy(),\n y_1.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n # numerically grad_in\n np.testing.assert_allclose(input_0.grad.detach().numpy(),\n input_1.grad.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n\n self.logger.info('Algorithm implemented using linearization of ops '\n 'numerically matches algorithm implemented with '\n 'loops')", "def main():\r\n global matrix_x, matrix_y\r\n if inputs_valid():\r\n if number_of_images_b.get() != \"\": #check if images_b empty\r\n matrix_size = (int(number_of_images_a.get()) +\r\n int(number_of_images_b.get()))\r\n else:\r\n matrix_size = int(number_of_images_a.get())\r\n\r\n size_prime, matrix_x, matrix_y = square_distribution(matrix_size)\r\n\r\n if size_prime:\r\n messagebox.showwarning(\"Grid can not be constructed\", (\r\n \"Error: grid of requested size can not be\"\r\n \"constructed (type a + type b is prime)\"))\r\n else:\r\n generate_image_matrices()\r\n messagebox.showinfo(\"\",\"done.\")", "def main(targets):\n # Parse through the datasets and select only relevant columns\n cpu_df = data_exploration.parse_cpu_data(\"data/raw/hw_metric_histo.csv000\")\n sys_df = data_exploration.parse_sys_data(\"data/raw/system_sysinfo_unique_normalized.csv000\")\n\n # Create a new reference to the optimized DataFrame\n optimized_df = data_exploration.optimize_dataframe(cpu_df)\n\n # grab the specific column \"HW::CORE:C0:PERCENT\" as a feature\n cpu = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:C0:PERCENT:\")\n\n # grab the specific column \"HW::CORE:TEMPERATURE:CENTIGRADE\" as a feature\n temp = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:TEMPERATURE:CENTIGRADE:\")\n\n # grab the GUIDs from each dataset and put them into lists\n sys_guid = data_exploration.get_guid(sys_df, 'guid')\n hw_guid = data_exploration.get_guid(cpu_df, 'guid')\n\n # checking for the GUID overlap in both datasets\n syshw_overlap = [guid for guid in sys_guid if guid in hw_guid]\n\n # objective is to create a dataframe of only matching GUIDs\n hwcpu_match = data_exploration.get_cpu_guid(cpu, syshw_overlap)\n\n # only grabbing the relevant columns to be matched on\n hwtemp_match = data_exploration.get_temp_guid(temp, syshw_overlap)\n\n # instantiating our dataframes to be joined\n hwtemp = pd.DataFrame(hwtemp_match.groupby('guid')['temp_mean'].mean())\n hwcpu = pd.DataFrame(hwcpu_match.groupby('guid')['utilization_mean'].mean())\n\n # joining our matched dataframes together, only using relevant columns\n combined = sys_df.join(hwcpu, on=['guid'], how='left')\n combined = combined.join(hwtemp, on=['guid'], how='left')\n combined = combined.drop(columns=['guid', 'model_normalized', \"processornumber\"])\n\n # create copy of our joined dataframe to be used for modelling\n feature_columns = combined.copy()\n\n # selecting only relevant columns to use for features\n feature_columns = feature_columns[['os','cpu_family', 'cpuvendor',\n 'graphicscardclass', 'persona']]\n\n # creating a completely one-hot encoded dataframe only containing relevant columns\n dummy = pd.get_dummies(feature_columns)\n\n # converting our categorical variables to be predicted on into numerical values\n cleanup_nums = {'persona': {'Web User': 0, 'Casual User': 1, 'Gamer':2, 'Casual Gamer': 3,\n 'Office/Productivity':4, 'Content Creator/IT': 5,\n 'Communication': 6, 'Win Store App User': 7, 'Entertainment': 8,\n 'File & Network Sharer':9, 'Unknown': 10}}\n\n # replacing the values in the column 'persona' to be numerical\n encode_persona = combined['persona'].to_frame().replace(cleanup_nums)\n\n # putting our old means back into the dummy dataframe\n dummy['util_mean'] = combined['utilization_mean']\n dummy['temp_mean'] = combined['temp_mean']\n # dummy = dummy.drop(columns=['persona'])\n dummy['persona'] = encode_persona['persona']\n\n dummy = dummy.dropna()\n nona_test = dummy.copy()\n\n # we want to predict on Y\n Y = nona_test['persona']\n X = nona_test.drop(columns=['persona'])\n\n # creating our test/train split\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n # all the models we are going to use\n names = [\"Nearest_Neighbors\", \"Linear_SVM\", \"Polynomial_SVM\", \"RBF_SVM\", \"Gradient_Boosting\"]\n\n # all of our predictors scaled to the degree of our datasets\n classifiers = [KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(kernel=\"poly\", degree=3, C=0.025),\n SVC(kernel=\"rbf\", C=1, gamma=2),\n GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]\n\n scores = []\n # we write in our accuracy scores to [scores]\n for name, clf in zip(names, classifiers):\n clf.fit(X_train, Y_train)\n score = clf.score(X_test, Y_test)\n scores.append(score)\n\n show = data_exploration.get_model_scores(names, scores)\n model_scores = data_exploration.plot_graphical_model_scores(show)", "def main():\n \n m = int(input(\"length of wall:\"))\n n = int(input(\"width of wall:\"))\n a = int(input(\"length of tile:\"))\n b = int(input(\"width of tile:\")) \n \n if m*n % a*b != 0:\n error1 = \"Wall cannot be covered completely by given tile dimension.\" + \"\\n\" + \"Unable to perform calculation.\"\n print(error1)\n return error1\n elif m == n == a == b:\n error2 = \"The tile is the size of the wall. No need for any calculations.\"\n print(error2)\n return error2\n else:\n matrix = [0]*(m*n)\n wall_identity = wall(m,n)\n final = tile_combi(m,n,a,b,wall_identity,matrix,[],[])\n num = len(final)\n for p in range(num):\n print(final[p],\"\\n\")\n print(\"Total number of possible combination:{0}\".format(num))\n max_num = num - 1\n choice = int(turtle.numinput(\"How to tile the wall?\",\"Select a tiling method from 0 - \" + str(max_num), None, 0, max_num))\n method = final[choice]\n wall_draw(m,n,method,wall_identity)", "def _relief_square_inner_refresh(self, add_instruction: Callable,\n top_color: ColorRGB, bottom_color: ColorRGB,\n wid_x: float, wid_y: float, wid_width: float, wid_height: float):\n lines = int(self.relief_square_inner_lines)\n offset = int(self.relief_square_inner_offset)\n for line in range(1, lines + 1):\n alpha = 0.9 - (line / lines) * 0.81\n line += offset\n line2 = 2 * line\n\n in_x1 = wid_x + line\n in_x2 = in_x1 + wid_width - line2\n in_y1 = wid_y + line\n in_y2 = in_y1 + wid_height - line2\n\n add_instruction(Color(*top_color, alpha)) # inside top left\n add_instruction(Line(points=[in_x1, in_y1, in_x1, in_y2, in_x2, in_y2]))\n add_instruction(Color(*bottom_color, alpha)) # inside bottom right\n add_instruction(Line(points=[in_x1, in_y1, in_x2, in_y1, in_x2, in_y2]))", "def run_split_with_solids(self,mc):\n top, bot = self.outs\n feed = self.ins[0]\n top.copy_like(feed)\n bot.copy_like(top)\n top_mass = top.mass\n F_mass_solids = sum(top_mass*self.split)\n TS=1-mc\n F_mass_tot = F_mass_solids/TS\n F_mass_wat = F_mass_tot - F_mass_solids\n top_mass[:] *= self.split\n top.imass['Water']=F_mass_wat\n bot.mass[:] -= top_mass", "def hjoin(first_sygroup, second_sygroup):\n visitor = HJoinVisitor(first_sygroup)\n second_sygroup.visit(visitor)", "def test_grad_square_matrix(func, motion, optimized, preserve_result, sqm):\n utils.test_reverse_array(func, motion, optimized, preserve_result, sqm)", "def test_scrunchlcs(self):\n a_in = os.path.join(self.datadir,\n 'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)\n b_in = os.path.join(self.datadir,\n 'monol_testB_E3-50_lc' + HEN_FILE_EXTENSION)\n out = os.path.join(self.datadir,\n 'monol_test_scrunchlc' + HEN_FILE_EXTENSION)\n command = '{0} {1} -o {2}'.format(a_in, b_in, out)\n\n hen.lcurve.scrunch_main(command.split())\n a_lc = hen.io.load_lcurve(a_in)\n b_lc = hen.io.load_lcurve(b_in)\n out_lc = hen.io.load_lcurve(out)\n assert np.all(out_lc.counts == a_lc.counts + b_lc.counts)\n gti_to_test = hen.io.load_events(self.first_event_file).gti\n assert np.allclose(gti_to_test, out_lc.gti)", "def affectedSquaresFor(square):\n return set(boxFor(square)).union(colFor(square)).union(rowFor(square))", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def test_broadcastable_flags_all_broadcastable_on_joinaxis(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(True, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(0, a, b)\r\n assert not c.type.broadcastable[0]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)", "def join_scaffolds(first_end, new_scaffold, new_end, links_dict, scaffold_list, used_scaffs): \n while new_end not in used_scaffs:\n\n if new_end in links_dict and len(links_dict[new_end]) > 0:\n next_scaff_start = links_dict[new_end]\n if next_scaff_start != other_side(first_end):\n ns = next_scaff_start.split(\"_\")\n next_scaff_number = int(ns[1])\n next_scaff_dir = ns[0]\n next_scaffold = scaffold_list[next_scaff_number]\n if next_scaff_dir == \"right\":\n next_scaffold.reverse()\n if new_scaffold[-2] != other_end(next_scaffold[1]):\n new_scaffold = new_scaffold[:-1] + [other_end(new_scaffold[-2])] + [other_end(next_scaffold[1])] + next_scaffold[1:]\n else:\n new_scaffold = new_scaffold[:-1] + next_scaffold[1:]\n used_scaffs.append(new_end)\n used_scaffs.append(next_scaff_start)\n new_end = other_side(next_scaff_start)\n else: \n new_scaffold[-1] = \"join_circle\"\n used_scaffs.append(new_end)\n \n else:\n new_scaffold[-1] = \"join_not_found\"\n used_scaffs.append(new_end)\n\n return new_scaffold", "def __split0_q_loop(\n col: int,\n r_tiles: SquareDiagTiles,\n proc_tile_start: torch.Tensor,\n active_procs: torch.Tensor,\n q0_tiles: SquareDiagTiles,\n q_dict: Dict,\n q_dict_waits: Dict,\n) -> None:\n tile_columns = r_tiles.tile_columns\n diag_process = (\n torch.nonzero(input=proc_tile_start > col, as_tuple=False)[0]\n if col != tile_columns\n else proc_tile_start[-1]\n )\n diag_process = diag_process.item()\n rank = r_tiles.arr.comm.rank\n q0_dtype = q0_tiles.arr.dtype\n q0_torch_type = q0_dtype.torch_type()\n q0_torch_device = q0_tiles.arr.device.torch_device\n # wait for Q tensors sent during the R calculation -----------------------------------------\n if col in q_dict_waits.keys():\n for key in q_dict_waits[col].keys():\n new_key = q_dict_waits[col][key][3] + key + \"e\"\n q_dict_waits[col][key][0][1].Wait()\n q_dict[col][new_key] = [\n q_dict_waits[col][key][0][0],\n q_dict_waits[col][key][1].wait(),\n q_dict_waits[col][key][2].wait(),\n ]\n del q_dict_waits[col]\n # local Q calculation =====================================================================\n if col in q_dict.keys():\n lcl_col_shape = r_tiles.local_get(key=(slice(None), col)).shape\n # get the start and stop of all local tiles\n # -> get the rows_per_process[rank] and the row_indices\n row_ind = r_tiles.row_indices\n prev_rows_per_pr = sum(r_tiles.tile_rows_per_process[:rank])\n rows_per_pr = r_tiles.tile_rows_per_process[rank]\n if rows_per_pr == 1:\n # if there is only one tile on the process: return q_dict[col]['0']\n base_q = q_dict[col][\"l0\"][0].clone()\n del q_dict[col][\"l0\"]\n else:\n # 0. get the offset of the column start\n offset = (\n torch.tensor(\n row_ind[col].item() - row_ind[prev_rows_per_pr].item(), device=q0_torch_device\n )\n if row_ind[col].item() > row_ind[prev_rows_per_pr].item()\n else torch.tensor(0, device=q0_torch_device)\n )\n # 1: create an eye matrix of the row's zero'th dim^2\n q_lcl = q_dict[col][\"l0\"] # [0] -> q, [1] -> shape of a use in q calc (q is square)\n del q_dict[col][\"l0\"]\n base_q = torch.eye(\n lcl_col_shape[r_tiles.arr.split], dtype=q_lcl[0].dtype, device=q0_torch_device\n )\n # 2: set the area of the eye as Q\n base_q[offset : offset + q_lcl[1][0], offset : offset + q_lcl[1][0]] = q_lcl[0]\n\n local_merge_q = {rank: [base_q, None]}\n else:\n local_merge_q = {}\n # -------------- send local Q to all -------------------------------------------------------\n for pr in range(diag_process, active_procs[-1] + 1):\n if pr != rank:\n hld = torch.zeros(\n [q0_tiles.lshape_map[pr][q0_tiles.arr.split]] * 2,\n dtype=q0_torch_type,\n device=q0_torch_device,\n )\n else:\n hld = local_merge_q[pr][0].clone()\n wait = q0_tiles.arr.comm.Ibcast(hld, root=pr)\n local_merge_q[pr] = [hld, wait]\n\n # recv local Q + apply local Q to Q0\n for pr in range(diag_process, active_procs[-1] + 1):\n if local_merge_q[pr][1] is not None:\n # receive q from the other processes\n local_merge_q[pr][1].Wait()\n if rank in active_procs:\n sum_row = sum(q0_tiles.tile_rows_per_process[:pr])\n end_row = q0_tiles.tile_rows_per_process[pr] + sum_row\n # slice of q_tiles -> [0: -> end local, 1: start -> stop]\n q_rest_loc = q0_tiles.local_get(key=(slice(None), slice(sum_row, end_row)))\n # apply the local merge to q0 then update q0`\n q_rest_loc = q_rest_loc @ local_merge_q[pr][0]\n q0_tiles.local_set(key=(slice(None), slice(sum_row, end_row)), value=q_rest_loc)\n del local_merge_q[pr]\n\n # global Q calculation =====================================================================\n # split up the Q's from the global QR calculation and set them in a dict w/ proper keys\n global_merge_dict = (\n __split0_global_q_dict_set(\n q_dict_col=q_dict[col], col=col, r_tiles=r_tiles, q_tiles=q0_tiles\n )\n if rank == diag_process\n else {}\n )\n\n if rank == diag_process:\n merge_dict_keys = set(global_merge_dict.keys())\n else:\n merge_dict_keys = None\n merge_dict_keys = r_tiles.arr.comm.bcast(merge_dict_keys, root=diag_process)\n\n # send the global merge dictionary to all processes\n for k in merge_dict_keys:\n if rank == diag_process:\n snd = global_merge_dict[k].clone()\n snd_shape = snd.shape\n r_tiles.arr.comm.bcast(snd_shape, root=diag_process)\n else:\n snd_shape = None\n snd_shape = r_tiles.arr.comm.bcast(snd_shape, root=diag_process)\n snd = torch.empty(snd_shape, dtype=q0_dtype.torch_type(), device=q0_torch_device)\n\n wait = r_tiles.arr.comm.Ibcast(snd, root=diag_process)\n global_merge_dict[k] = [snd, wait]\n if rank in active_procs:\n # create a dictionary which says what tiles are in each column of the global merge Q\n qi_mult = {}\n for c in range(q0_tiles.tile_columns):\n # this loop is to slice the merge_dict keys along each column + create the\n qi_mult_set = set([(i, c) for i in range(col, q0_tiles.tile_columns)])\n if len(qi_mult_set & merge_dict_keys) != 0:\n qi_mult[c] = list(qi_mult_set & merge_dict_keys)\n\n # have all the q_merge in one place, now just do the mm with q0\n # get all the keys which are in a column (qi_mult[column])\n row_inds = q0_tiles.row_indices + [q0_tiles.arr.gshape[0]]\n q_copy = q0_tiles.arr.larray.clone()\n for qi_col in qi_mult.keys():\n # multiply q0 rows with qi cols\n # the result of this will take the place of the row height and the column width\n out_sz = q0_tiles.local_get(key=(slice(None), qi_col)).shape\n mult_qi_col = torch.zeros(\n (q_copy.shape[1], out_sz[1]), dtype=q0_dtype.torch_type(), device=q0_torch_device\n )\n for ind in qi_mult[qi_col]:\n if global_merge_dict[ind][1] is not None:\n global_merge_dict[ind][1].Wait()\n lp_q = global_merge_dict[ind][0]\n if mult_qi_col.shape[1] < lp_q.shape[1]:\n new_mult = torch.zeros(\n (mult_qi_col.shape[0], lp_q.shape[1]),\n dtype=mult_qi_col.dtype,\n device=q0_torch_device,\n )\n new_mult[:, : mult_qi_col.shape[1]] += mult_qi_col.clone()\n mult_qi_col = new_mult\n\n mult_qi_col[\n row_inds[ind[0]] : row_inds[ind[0]] + lp_q.shape[0], : lp_q.shape[1]\n ] = lp_q\n hold = torch.matmul(q_copy, mult_qi_col)\n\n write_inds = q0_tiles.get_start_stop(key=(0, qi_col))\n q0_tiles.arr.lloc[:, write_inds[2] : write_inds[2] + hold.shape[1]] = hold\n else:\n for ind in merge_dict_keys:\n global_merge_dict[ind][1].Wait()\n if col in q_dict.keys():\n del q_dict[col]", "def ggpl_house():\n\n\t# .lines ogni riga ha due coppie di x/y che costituiscono un segmento\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_esterni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\texternalWalls = MKPOL([verts,cells,None])\n\tfloor = SOLIDIFY(externalWalls)\n\tfloor = S([1,2,3])([.04,.04,.04])(floor)\n\texternalWalls = S([1,2,3])([.04,.04,.04])(externalWalls)\n\texternalWalls = OFFSET([.2,.2,4])(externalWalls)\n\theightWalls = SIZE([3])(externalWalls)[0]\n\tthicknessWalls = SIZE([2])(externalWalls)[0]\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/muri_interni.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tinternalWalls = MKPOL([verts,cells,None])\n\tinternalWalls = S([1,2,3])([.04,.04,.04])(internalWalls)\n\tinternalWalls = OFFSET([.2,.2,4])(internalWalls)\n\twalls = STRUCT([externalWalls, internalWalls])\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/porte.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\tdoors = MKPOL([verts,cells,None])\n\tdoors = SOLIDIFY(doors)\n\tdoors = S([1,2,3])([.04,.04,.04])(doors)\n\tdoors = OFFSET([.2,.2,3])(doors)\n\twalls = DIFFERENCE([walls, doors])\n\n\n\tverts = []\n\tcells = []\n\ti = 0\n\treader = csv.reader(open(\"lines/finestre.lines\", 'rb'), delimiter=',') \n\tfor row in reader:\n\t\tverts.append([float(row[0]), float(row[1])])\n\t\tverts.append([float(row[2]), float(row[3])])\n\t\ti+=2\n\t\tcells.append([i-1,i])\n\n\twindows = MKPOL([verts,cells,None])\n\twindows = SOLIDIFY(windows)\n\twindows = S([1,2,3])([.04,.04,.04])(windows)\n\twindows = OFFSET([.2,.2,2])(windows)\n\theightWindows = SIZE([3])(windows)[0]\n\twindows = T(3)((heightWalls-heightWindows)/2.)(windows)\n\twalls = DIFFERENCE([walls, windows])\n\n\tfloor = TEXTURE(\"texture/floor.jpg\")(floor)\n\twalls = TEXTURE(\"texture/wall.jpg\")(walls)\n\thome = STRUCT([floor, walls])\n\treturn home", "def cat_splits_lin_full(cat,cols=None,mask=None,p=None):\n\n if p is not None:\n jobs=[]\n p=multiprocessing.Pool(processes=config.cfg.get('proc',32),maxtasksperchild=config.cfg.get('task',None))\n\n mask=catalog.CatalogMethods.check_mask(cat.coadd,mask)\n if cols is None:\n cols=catalog.CatalogMethods.get_cat_colnames(cat)\n\n txt.write_methods.heading('Linear Splits Full',cat,label='linear_splits_full',create=True)\n\n for val in cols:\n for val2 in cols:\n if (val==val2)|(val in ['e1','e2'])|(val2 in ['e1','e2']):\n continue\n\n array=getattr(cat,val)\n array2=getattr(cat,val2)\n name=fig.plot_methods.get_filename_str(cat)\n\n if p is not None:\n job=p.apply_async(split_gals_lin_along_base,[[cat.cat,cat.bs,cat.wt,cat.e1,cat.e2,cat.m1,cat.m2,cat.c1,cat.c2,cat.w],val,array,mask,name],{'log':config.log_val.get(val,False),'log2':config.log_val.get(val2,False),'plot':True,'e':False,'val2':val2,'array2':array2})\n jobs.append(job)\n else:\n tmp,tmp,arr1,arr1err,e1,e2,e1err,e2err,m1,m2,b1,b2,m1err,m2err,b1err,b2err=split_gals_lin_along_base([cat.cat,cat.bs,cat.wt,cat.e1,cat.e2,cat.m1,cat.m2,cat.c1,cat.c2,cat.w],val,array,mask,name,log=config.log_val.get(val,False),log2=config.log_val.get(val2,False),plot=True,e=False,val2=val2,array2=array2)\n txt.write_methods.heading(val+' '+val2,cat,label='linear_splits',create=False)\n # txt.write_methods.write_append(x+' '+str(arr1)+' '+str(arr1err),cat,label='linear_splits',create=False)\n # txt.write_methods.write_append('e '+str(e1)+' '+str(e2),cat,label='linear_splits',create=False)\n # txt.write_methods.write_append('e err '+str(e1err)+' '+str(e2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope '+str(m1)+' '+str(m2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope err '+str(m1err)+' '+str(m2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept '+str(b1)+' '+str(b2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept err '+str(b1err)+' '+str(b2err),cat,label='linear_splits',create=False)\n\n if p is not None:\n for job in jobs:\n val,val2,arr1,arr1err,e1,e2,e1err,e2err,m1,m2,b1,b2,m1err,m2err,b1err,b2err=job.get()\n txt.write_methods.heading(val+' '+val2,cat,label='linear_splits',create=False)\n # txt.write_methods.write_append(x+' '+str(arr1)+' '+str(arr1err),cat,label='linear_splits',create=False)\n # txt.write_methods.write_append('e '+str(e1)+' '+str(e2),cat,label='linear_splits',create=False)\n # txt.write_methods.write_append('e err '+str(e1err)+' '+str(e2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope '+str(m1)+' '+str(m2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope err '+str(m1err)+' '+str(m2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept '+str(b1)+' '+str(b2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept err '+str(b1err)+' '+str(b2err),cat,label='linear_splits',create=False)\n\n p.close()\n p.join()\n \n return", "def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)", "def compute_md5(self, table1, table2):\n logging.info(\n f\"\"\"compute_md5 {table1.tableName} in 2 threads ora_Hash & pg_Hash\"\"\")\n\n \"\"\"\n get the queries to be executed\n \"\"\"\n qry1 = self.get_queries(table1,1)\n qry2 = self.get_queries(table2,2)\n\n err = 0\n maxnumrows1 = 0\n result_md5 = self.ResultMd5('',0)\n\n if qry1 is None:\n err = 1\n \"\"\"\n there is qry to execute\n \"\"\"\n while (qry1 is not None):\n\n if table1.numrows == table2.numrows:\n\n \"\"\"\n create the 2 threads objects to execute qry\n \"\"\"\n qry_thread_1 = ExecQry(\n table1.getengine() + '_Hash',table1,qry1.sqltext)\n qry_thread_2 = ExecQry(\n table2.getengine() + '_Hash',table2,qry2.sqltext)\n\n # logging.debug(\"thread1 = \" + qry_thread_1.name + \"qry = \" + qry1.\n # sqltext)\n # logging.debug(\"thread2 = \" + qry_thread_2.name + \"qry = \" + qry2.\n # sqltext)\n\n self.set_status(qry1.id,'running',1)\n self.set_status(qry2.id,'running',2)\n\n self.set_qry(qry1.id,qry1.sqltext,1)\n self.set_qry(qry2.id,qry2.sqltext,2)\n\n \"\"\"\n start the threads on server1 and server2\n \"\"\"\n try:\n qry_thread_1.start()\n except Exception:\n logging.error(\"thread error\")\n break\n try:\n qry_thread_2.start()\n except Exception:\n logging.error(\"thread error\")\n break\n\n \"\"\"\n wait for the 2 thread to terminate\n \"\"\"\n r1 = qry_thread_1.join()\n r2 = qry_thread_2.join()\n ret1 = self.ResultMd5('',0)\n ret2 = self.ResultMd5('',0)\n ret1.result = r1[0][0]\n ret1.numrows = r1[0][1]\n ret2.result = r2[0][0]\n ret2.numrows = r2[0][1]\n else:\n ret1 = self.ResultMd5(\n table1.getengine() + ' nbrows<>' + str(table1.numrows),table1.numrows)\n ret2 = self.ResultMd5(\n table2.getengine() + ' nbrows<>' + str(table2.numrows),table2.numrows)\n err = 1\n\n \"\"\"\n Fill some flag values to represent the status and result\n \"\"\"\n vhash1 = None\n hash1 = ''\n vhash2 = None\n hash2 = ''\n numrows1 = 0\n result = 'nok'\n\n if ret1 is not None:\n vhash1 = ret1\n hash1 = ret1.result\n numrows1 = ret1.numrows\n\n if ret2 is not None:\n vhash2 = ret2\n hash2 = ret2.result\n\n if numrows1 > maxnumrows1:\n maxnumrows1 = numrows1\n\n self.set_hash(qry1.id,vhash1,1)\n self.set_hash(qry2.id,vhash2,2)\n\n \"\"\"\n set the result of this partition\n \"\"\"\n\n if ((hash1 != '') and (hash2 != '')):\n if (hash1 == hash2) and (hash1 != 'norows'):\n result = 'ok'\n else:\n err = err + 1\n\n self.set_result(qry1.id,result)\n\n \"\"\"\n tell that this partition has been processed\n \"\"\"\n self.set_status(qry1.id,'done',1)\n self.set_status(qry2.id,'done',2)\n\n \"\"\"\n get the next query for step 1\n \"\"\"\n qry1 = self.get_queries(table1,1)\n qry2 = self.get_queries(table2,2)\n\n if err > 0:\n result = 'nok'\n result_md5 = self.ResultMd5(result,maxnumrows1)\n\n return result_md5", "def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...", "def _relief_square_outer_refresh(self, add_instruction: Callable,\n top_color: ColorRGB, bottom_color: ColorRGB,\n wid_x: float, wid_y: float, wid_width: float, wid_height: float):\n lines = int(self.relief_square_outer_lines)\n for line in range(1, lines + 1):\n alpha = 0.9 - (line / lines) * 0.81\n line2 = 2 * line\n\n out_x1 = wid_x - line\n out_x2 = out_x1 + wid_width + line2\n out_y1 = wid_y - line\n out_y2 = out_y1 + wid_height + line2\n\n add_instruction(Color(*top_color, alpha)) # outside upper left\n add_instruction(Line(points=[out_x1, out_y1, out_x1, out_y2, out_x2, out_y2]))\n add_instruction(Color(*bottom_color, alpha)) # outside bottom right\n add_instruction(Line(points=[out_x1, out_y1, out_x2, out_y1, out_x2, out_y2]))", "def test_merge_two_two_same():\n run_merge([1, 3], [1, 3], [1, 1, 3, 3])", "def compare_sub_and_full(\n mc, reverse_graph, a_indices, b_indices, num_sampled, max_depth=1, num_iters=1\n):\n print(\"Comparing the subsample to the full method.\")\n\n # TEMP going to override the full\n # found_error = False\n # j = 5\n # while (not found_error) and (j < 25):\n # mc, reverse_graph, to_write, args_dict = gen_random_matrix_(\n # [j, j], {}, (0.2, 0.1, 0.1, 0.1)\n # )\n # num_sampled = [j // 4, j // 4]\n # a_indices = np.sort(\n # np.random.choice(np.arange(j), size=j // 2, replace=False)\n # ).astype(int)\n # b_indices = np.sort(\n # np.random.choice(np.arange(j), size=j // 2, replace=False)\n # ).astype(int)\n\n sub_mc, args_dict = mc.compute_probe_stats(a_indices, b_indices)\n sub_mc.create_connections()\n reverse_sub = reverse(sub_mc.graph)\n\n flat_indices_a = np.arange(len(a_indices))\n flat_indices_b = np.arange(len(b_indices))\n\n def random_var_gen(iter_val):\n start_idx = np.random.choice(flat_indices_a, size=num_sampled[0], replace=False)\n end_idx = np.random.choice(flat_indices_b, size=num_sampled[1], replace=False)\n\n start = np.array(a_indices)[start_idx]\n end = np.array(b_indices)[end_idx]\n end = end + mc.num_a\n end_idx = end_idx + sub_mc.num_a\n\n return start_idx, end_idx, start, end\n\n def fn_to_eval(start, end):\n return find_connected_limited(mc.graph, start, end, max_depth, reverse_graph)\n\n def sub_fn_to_eval(start, end):\n return find_connected_limited(sub_mc.graph, start, end, max_depth, reverse_sub)\n\n full_results = []\n for i in range(num_iters):\n start_idx, end_idx, start, end = random_var_gen(i)\n big_res = fn_to_eval(start, end)\n small_res = sub_fn_to_eval(start_idx, end_idx)\n res = dict(\n big=big_res,\n small=small_res,\n start_idx=start_idx,\n end_idx=end_idx,\n start=start,\n end=end,\n )\n small_res_mod = (\n np.array(b_indices)[np.array(small_res).astype(int) - sub_mc.num_a]\n + mc.num_a\n )\n if not np.all(small_res_mod == np.array(big_res)):\n res[\"mod\"] = small_res_mod\n r1 = nx_find_connected_limited(\n nx_create_graph(mc.graph),\n start,\n end,\n max_depth,\n )\n r2 = nx_find_connected_limited(\n nx_create_graph(sub_mc.graph),\n start_idx,\n end_idx,\n max_depth,\n )\n res[\"nx\"] = dict(big=r1, small=r2)\n print(res)\n found_error = True\n print(a_indices)\n print(b_indices)\n print(mc.graph)\n print(sub_mc.graph)\n\n f1 = vis_graph(mc.graph, [mc.num_a, mc.num_b], start, end, reachable=r1)\n f2 = vis_graph(\n sub_mc.graph,\n [sub_mc.num_a, sub_mc.num_b],\n start_idx,\n end_idx,\n reachable=r2,\n )\n f1.savefig(os.path.join(here, \"..\", \"figures\", \"big_graph_debug.png\"))\n f2.savefig(os.path.join(here, \"..\", \"figures\", \"small_graph_debug.png\"))\n break\n\n full_results.append(res)\n\n # j = j + 1\n return full_results", "def __split0_r_calc(\n r_tiles: SquareDiagTiles,\n q_dict: Dict,\n q_dict_waits: Dict,\n col_num: int,\n diag_pr: int,\n not_completed_prs: torch.Tensor,\n) -> None:\n tile_rows_proc = r_tiles.tile_rows_per_process\n comm = r_tiles.arr.comm\n rank = comm.rank\n lcl_tile_row = 0 if rank != diag_pr else col_num - sum(tile_rows_proc[:rank])\n # only work on the processes which have not computed the final result\n q_dict[col_num] = {}\n q_dict_waits[col_num] = {}\n\n # --------------- local QR calc -----------------------------------------------------\n base_tile = r_tiles.local_get(key=(slice(lcl_tile_row, None), col_num))\n try:\n q1, r1 = torch.linalg.qr(base_tile, mode=\"complete\")\n except AttributeError:\n q1, r1 = base_tile.qr(some=False)\n\n q_dict[col_num][\"l0\"] = [q1, base_tile.shape]\n r_tiles.local_set(key=(slice(lcl_tile_row, None), col_num), value=r1)\n if col_num != r_tiles.tile_columns - 1:\n base_rest = r_tiles.local_get((slice(lcl_tile_row, None), slice(col_num + 1, None)))\n loc_rest = torch.matmul(q1.T, base_rest)\n r_tiles.local_set(key=(slice(lcl_tile_row, None), slice(col_num + 1, None)), value=loc_rest)\n # --------------- global QR calc (binary merge) -------------------------------------\n rem1 = None\n rem2 = None\n offset = not_completed_prs[0]\n loop_size_remaining = not_completed_prs.clone()\n completed = bool(loop_size_remaining.size()[0] <= 1)\n procs_remaining = loop_size_remaining.size()[0]\n loop = 0\n while not completed:\n if procs_remaining % 2 == 1:\n # if the number of processes active is odd need to save the remainders\n if rem1 is None:\n rem1 = loop_size_remaining[-1]\n loop_size_remaining = loop_size_remaining[:-1]\n elif rem2 is None:\n rem2 = loop_size_remaining[-1]\n loop_size_remaining = loop_size_remaining[:-1]\n if rank not in loop_size_remaining and rank not in [rem1, rem2]:\n break # if the rank is done then exit the loop\n # send the data to the corresponding processes\n half_prs_rem = torch.div(procs_remaining, 2, rounding_mode=\"floor\")\n\n zipped = zip(\n loop_size_remaining.flatten()[:half_prs_rem],\n loop_size_remaining.flatten()[half_prs_rem:],\n )\n for pr in zipped:\n pr0, pr1 = int(pr[0].item()), int(pr[1].item())\n __split0_merge_tile_rows(\n pr0=pr0,\n pr1=pr1,\n column=col_num,\n rank=rank,\n r_tiles=r_tiles,\n diag_process=diag_pr,\n key=str(loop) + \"p0\" + str(pr0) + \"p1\" + str(pr1) + \"e\",\n q_dict=q_dict,\n )\n\n __split0_send_q_to_diag_pr(\n col=col_num,\n pr0=pr0,\n pr1=pr1,\n diag_process=diag_pr,\n comm=comm,\n q_dict=q_dict,\n key=str(loop) + \"p0\" + str(pr0) + \"p1\" + str(pr1) + \"e\",\n q_dict_waits=q_dict_waits,\n q_dtype=r_tiles.arr.dtype.torch_type(),\n q_device=r_tiles.arr.larray.device,\n )\n\n loop_size_remaining = loop_size_remaining[: -1 * (half_prs_rem)]\n procs_remaining = loop_size_remaining.size()[0]\n\n if rem1 is not None and rem2 is not None:\n # combine rem1 and rem2 in the same way as the other nodes,\n # then save the results in rem1 to be used later\n __split0_merge_tile_rows(\n pr0=rem2,\n pr1=rem1,\n column=col_num,\n rank=rank,\n r_tiles=r_tiles,\n diag_process=diag_pr,\n key=str(loop) + \"p0\" + str(int(rem1)) + \"p1\" + str(int(rem2)) + \"e\",\n q_dict=q_dict if q_dict is not None else {},\n )\n\n rem1, rem2 = int(rem1), int(rem2)\n __split0_send_q_to_diag_pr(\n col=col_num,\n pr0=rem2,\n pr1=rem1,\n diag_process=diag_pr,\n key=str(loop) + \"p0\" + str(int(rem1)) + \"p1\" + str(int(rem2)) + \"e\",\n q_dict=q_dict if q_dict is not None else {},\n comm=comm,\n q_dict_waits=q_dict_waits,\n q_dtype=r_tiles.arr.dtype.torch_type(),\n q_device=r_tiles.arr.larray.device,\n )\n rem1 = rem2\n rem2 = None\n\n loop += 1\n if rem1 is not None and rem2 is None and procs_remaining == 1:\n # combine rem1 with process 0 (offset) and set completed to True\n # this should be the last thing that happens\n __split0_merge_tile_rows(\n pr0=offset,\n pr1=rem1,\n column=col_num,\n rank=rank,\n r_tiles=r_tiles,\n diag_process=diag_pr,\n key=str(loop) + \"p0\" + str(int(offset)) + \"p1\" + str(int(rem1)) + \"e\",\n q_dict=q_dict,\n )\n\n offset, rem1 = int(offset), int(rem1)\n __split0_send_q_to_diag_pr(\n col=col_num,\n pr0=offset,\n pr1=rem1,\n diag_process=diag_pr,\n key=str(loop) + \"p0\" + str(int(offset)) + \"p1\" + str(int(rem1)) + \"e\",\n q_dict=q_dict,\n comm=comm,\n q_dict_waits=q_dict_waits,\n q_dtype=r_tiles.arr.dtype.torch_type(),\n q_device=r_tiles.arr.larray.device,\n )\n rem1 = None\n\n completed = True if procs_remaining == 1 and rem1 is None and rem2 is None else False", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def main_execute(vars):\n\n # Unpack necessary variables\n # output_directory is the root output folder for the run\n output_directory = vars[\"output_directory\"]\n\n # This will run operations which will:\n # 1) generate new ligands\n # 2) optionally filter ligands\n # 3) optionally convert from 1D smiles to 3D (mol2/PDB)\n\n sys.stdout.flush()\n\n\n smile_file_new_gen, new_gen_ligands_list = operations.populate_generation(vars)\n sys.stdout.flush()\n\n if new_gen_ligands_list is None:\n raise ValueError(\"Population failed to make enough mutants... \\\n Errors could include not enough diversity, too few seeds to the generation, \\\n number_of_mutants is too high, \\\n or all of the seed lack functional groups for performing reactions.\")\n\n sys.stdout.flush()", "def main(self):\n lines=open(self.expttxt,'r').readlines()\n if self.parall:\n pool=Pool(self.prod)\n pool.map(self.mutatraj,lines)\n pool.close()\n pool.join()\n else:\n for line in lines:\n self.mutatraj(line)", "def test_2():\n \n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n filter_then_square(in_streams[0], out_streams[0],\n filter_threshold=20)\n\n def h(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=s.name)\n \n\n # Specify processes and connections.\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n },\n 'actuators': {}\n },\n 'filter_and_square_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('filtered', 'i')],\n 'compute_func': g,\n 'sources': {},\n 'actuators': {}\n },\n 'aggregate_and_output_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': h,\n 'sources': {},\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('filter_and_square_process', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'filter_and_square_process' :\n {\n 'filtered' : [('aggregate_and_output_process', 'in')],\n },\n 'aggregate_and_output_process':\n {}\n }\n\n multicore(processes, connections)", "def run(self):\n first_index, last_index = \\\n self.get_initial_blocks()\n while last_index - first_index > self.block_size:\n first_index, last_index = \\\n self.join_blocks(first_index, last_index)\n self.merge_blocks(self.output_file_name, first_index, last_index)", "def FindCouplings():\n l1v = np.linspace(l1min, l1max, num=48)\n l2v = np.logspace(np.log10(l2min), np.log10(l2max), num=48)\n l3v = np.linspace(l3min, l3max, num=48)\n gxv = np.linspace(gxmin, gxmax, num=48)\n p = multiprocessing.Pool()\n f = open(file_name, 'w+')\n line = '|l1--l2--l3--gx--minima--mass1--mass2--stable|'\n f.write(line+'\\n')\n f.write('-'*90+'\\n')\n f.close()\n for l1 in l1v:\n for l2 in l2v:\n start_time_loop = time.time()\n params = cartesian((l1, -l2, l3v, gxv))\n print params.shape\n p.map(CheckCouplings, params)\n print(\"--- Loop has taken: %s seconds ---\" % (time.time() - start_time_loop))", "def gaussp(y1, y2, n):\n\n # First check for trivial or stupid requests\n if n <= 0:\n raise ValueError(\"Zero (or less) grid points is stupid. Stop it.\")\n if n == 1:\n r = np.array([0.5*(y2 + y1)])\n wt = np.array([y2 - y1])\n return r, wt\n N_pi = 3.14159265358979323844 # Fortran uses stupid pi because of course it does\n EPS = 1e-14 # Desired accuracy\n n_sav = -1\n\n if n != n_sav:\n n_sav = n\n m = n\n m, r, wt = GridGenerator.gausspp(m)\n m = 0\n\n if m != n:\n m = int((n+1)/2) # Care, integer division\n x = np.zeros((2*m)) # Working r, not returned\n w = np.zeros((2*m)) # Working wt, not returned\n r = np.zeros((2*m))\n wt = np.zeros((2*m))\n for i in range(m):\n r[i] = N_pi*(i+0.75)/(n+0.5)\n r = np.cos(r)\n\n for i in range(m):\n z = r[i]\n z1 = 1e20 # Arbitrary large number to ensure at least 1 loop\n while abs(z-z1) > EPS:\n p1 = 1.0\n p2 = 0.0\n for j in range(n):\n p3 = p2\n p2 = p1\n p1 = ((2*(j + 1) - 1)*z*p2 - j*p3)/(j + 1)\n pp = n*(z*p1 - p2)/(z*z - 1.0)\n z1 = z\n z = z1 - p1/pp\n x[i] = -z\n x[n - (i + 1)] = z\n w[i] = 2.0/((1.0 - z*z)*pp*pp)\n w[n - (i + 1)] = w[i]\n\n for i in range(n):\n fact = 0.5*(y2-y1)\n r[i] = y1 + fact*(x[i] + 1.0)\n wt[i] = fact*w[i]\n\n return n, r, wt", "def multiproc_vca(subcube_locs,channels,output_loc,fig_loc,dimensions):\n\t\n\twith schwimmbad.MultiPool() as pool:\n\t\tprint('started multi processing')\n\t\tprint(datetime.datetime.now())\n\n\t\t#create the lists for multiprocessing\n\t\t#vcacube=[f'{subcube_locs}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tvcacube=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for k in subcube_locs for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tchansamps=[channels for j in np.arange(0,dimensions) for k in subcube_locs for i in np.arange(0,dimensions)]\n\t\t#arrayloc=[f'{output_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tarrayloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in output_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\t#figloc=[f'{fig_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tfigloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in fig_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\n\n\t\tinputs=list(zip(vcacube,chansamps,arrayloc,figloc))\n\t\tprint(f'THESE ARE THE INPUTS FOR MULTIPROCESSING:{inputs}')\n\n\t\tout = list(pool.map(do_vca, inputs))\n\t\tprint('finished multiprocessing')\n\t\tprint(datetime.datetime.now())\n\tprint(out)", "def cpp_calcJoinMatrix(self, noofRows, noofRows2, noofRows3, noofRows4, noofRows5):\n return _patchExtractor.patchExtractor_cpp_calcJoinMatrix(self, noofRows, noofRows2, noofRows3, noofRows4, noofRows5)", "def compute_chessboard_squares(e1, e2):\n\n squares = []\n\n # equal in an ideal world\n square_dim_x = int(math.ceil((e2.x - e1.x)/8.0))\n square_dim_y = int(math.ceil((e2.y - e1.y)/8.0))\n\n y = e1.y\n while y < e2.y - square_dim_y/2:\n x = e1.x\n while x < e2.x - square_dim_x/2:\n extr_x = min(x + square_dim_x, e2.x)\n extr_y = min(y + square_dim_y, e2.y)\n\n sq = (Point(x, y), Point(extr_x, extr_y))\n squares.append(sq)\n\n x += square_dim_x\n\n y += square_dim_y\n\n return squares", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def auto_join(blue_range, red_range, coadd_consecutive=True, do_joins=False):\r\n\r\n coadd_map = []\r\n current_obj_ra = ''\r\n current_blue = []\r\n current_red = []\r\n objects = []\r\n b = blue_range[0]\r\n r = red_range[0]\r\n while (b <= blue_range[1]) or (r <= red_range[1]):\r\n print b, r\r\n\r\n def load_hdrpars(i,side='blue'):\r\n filename = '%s%04d.fits' % (side, i)\r\n file_exists = os.path.exists(filename)\r\n if file_exists:\r\n hdr = pyfits.getheader(filename)\r\n # just use the nearest arcsec--small telescope drifts otherwise\r\n return True, hdr['OBJECT'],hdr['RA'][:8],hdr['DEC'][:9]\r\n else:\r\n return False, None, None, None\r\n\r\n bfileexists, bobj, bra, bdec = load_hdrpars(b,side='blue')\r\n rfileexists, robj, rra, rdec = load_hdrpars(r,side='red')\r\n\r\n if bfileexists and rfileexists and (bra == rra) and (bdec == rdec):\r\n # both sides observe same object\r\n if (rra == current_obj_ra) and coadd_consecutive:\r\n # which matches the previous object\r\n current_blue.append(b)\r\n current_red.append(r)\r\n current_obj = robj\r\n else:\r\n # both sides observe a new object\r\n if current_obj_ra != '': # starting the list\r\n coadd_map.append((current_blue, current_red))\r\n current_obj = robj\r\n objects.append(current_obj)\r\n current_blue = [b]\r\n current_red = [r]\r\n current_obj_ra = rra\r\n b+=1\r\n r+=1\r\n else:\r\n # both sides observe different objects (or one side is missing)\r\n if rfileexists and (rra == current_obj_ra) and coadd_consecutive:\r\n current_red.append(r)\r\n r+=1\r\n elif bfileexists and (bra == current_obj_ra) and coadd_consecutive:\r\n current_blue.append(b)\r\n b+=1\r\n else:\r\n # some other state. save last object\r\n coadd_map.append((current_blue, current_red))\r\n objects.append(current_obj)\r\n\r\n # peek ahead\r\n _, nbobj, nbra, nbdec = load_hdrpars(b+1,side='blue')\r\n _, nrobj, nrra, nrdec = load_hdrpars(r+1,side='red')\r\n\r\n # does current blue match either of next objects?\r\n if bfileexists:\r\n if (bra != nbra) and (bra != nrra):\r\n # no--write it out by itself\r\n coadd_map.append(([b],[]))\r\n current_blue = []\r\n objects.append(bobj)\r\n else:\r\n # save and continue\r\n current_blue = [b]\r\n current_obj = bobj\r\n b+=1\r\n\r\n # does current red match either of next objects?\r\n if rfileexists:\r\n if (rra != nbra) and (rra != nrra):\r\n # no--write it out by itself\r\n coadd_map.append(([],[r]))\r\n current_red = []\r\n objects.append(robj)\r\n else:\r\n # save and continue\r\n current_red = [r]\r\n current_obj = robj\r\n current_ra = rra\r\n r+=1\r\n\r\n # save final object\r\n coadd_map.append((current_blue, current_red))\r\n\r\n for x in zip(objects, coadd_map):\r\n print x\r\n if do_joins:\r\n for lists in coadd_map:\r\n combine_sides(lists[0], lists[1],splot='no')\r\n\r\n return coadd_map, objects", "def test_closure():\n x = torch.randn(300_000, 3)\n Ys = [o3.spherical_harmonics(l, x, normalize=True) for l in range(0, 3 + 1)]\n for l1, Y1 in enumerate(Ys):\n for l2, Y2 in enumerate(Ys):\n m = Y1[:, :, None] * Y2[:, None, :]\n m = m.mean(0) * 4 * math.pi\n if l1 == l2:\n i = torch.eye(2 * l1 + 1)\n assert (m - i).abs().max() < 0.01\n else:\n assert m.abs().max() < 0.01", "def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40", "def fight(self):\n for j in range(self.num):\n for i in range(self.num):\n self.genepool[0][j].fight(self.genepool[1][i],self.gameOne,self.len)\n self.genepool[1][j].fight(self.genepool[0][i],self.gameTwo,self.len)\n for i in range(self.num):\n self.genepool[0][i].fitness /= self.num\n self.genepool[1][i].fitness /= self.num", "def _format_joining_functions(self):\n ## TODO: Extend to n possible neighs_info elements\n if self.staticneighs:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_static_dist\n self.join_neighs_or = join_neighsinfo_OR_static_dist\n self.join_neighs_xor = join_neighsinfo_XOR_static_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_static_notdist\n self.join_neighs_or = join_neighsinfo_OR_static_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_static_notdist\n else:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_dist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_dist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_notdist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_notdist", "def test_4():\n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n identity(in_streams[0], out_streams[0])\n\n def g(in_streams, out_streams):\n multiply(in_streams[0], out_streams[0],\n multiplicand=2)\n\n def h(in_streams, out_streams):\n square(in_streams[0], out_streams[0])\n\n def m(in_streams, out_streams):\n s = Stream('s')\n sum_numbers(in_streams, s)\n print_stream(s, name='s')\n\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n }\n },\n 'multiply_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': g,\n 'sources': {}\n },\n 'square_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': h,\n 'sources': {}\n },\n 'merge_process':\n {'in_stream_names_types': [('in_multiply', 'i'),\n ('in_square', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': m,\n 'sources': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('multiply_process', 'in'), ('square_process', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'multiply_process':\n {\n 'out' : [('merge_process', 'in_multiply')]\n },\n 'square_process':\n {\n 'out' : [('merge_process', 'in_square')]\n },\n 'merge_process':\n {\n }\n }\n\n multicore(processes, connections)", "def test_combine():\n # Create 4 square arrays:\n # 0 1 2 3\n # -----------\n # 00 11 22 33\n # 00 11 22 33\n tiles = [np.array(_square(i)) for i in range(4)]\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], tiles[1], tiles[2]) # Too few values.\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], None, None, None, None) # Too many values.\n\n # Combine them the 4 major ways:\n\n # case1: corner\n # 0X\n # XX\n case1 = _combine_tiles(tiles[0], None, None, None)\n assert case1.shape == (2, 2)\n assert (case1 == tiles[0]).all()\n\n # case2: bottom edge\n # 01\n # XX\n case2 = _combine_tiles(tiles[0], tiles[1], None, None)\n assert case2.shape == (2, 4)\n assert (case2[0:2, 0:2] == tiles[0]).all()\n assert (case2[0:2, 3:5] == tiles[1]).all()\n\n # case3: right edge\n # 0X\n # 2X\n case3 = _combine_tiles(tiles[0], None, tiles[2], None)\n assert case3.shape == (4, 2)\n assert (case3[0:2, 0:2] == tiles[0]).all()\n assert (case3[3:5, 0:2] == tiles[2]).all()\n\n # case4: interior\n # 01\n # 23\n case4 = _combine_tiles(tiles[0], tiles[1], tiles[2], tiles[3])\n assert case4.shape == (4, 4)\n assert (case4[0:2, 0:2] == tiles[0]).all()\n assert (case4[0:2, 3:5] == tiles[1]).all()\n assert (case4[3:5, 0:2] == tiles[2]).all()\n assert (case4[3:5, 3:5] == tiles[3]).all()", "def _general_link(clusters, i, j, method):\n for k in range(len(clusters)):\n if k != i and k != j:\n if method.__name__ == \"ward_update\":\n new_distance = method(clusters[i,k], clusters[j,k], k)\n else:\n new_distance = method(clusters[i,k], clusters[j,k])\n clusters[i,k] = new_distance\n clusters[k,i] = new_distance\n return clusters", "def test_assembly_inner_product_1_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '1-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '1-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '1-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)", "def join(self, batches: List[Batch], outpath: str) -> None:\n if self.threads == 1:\n super().join(batches, outpath)\n else:\n self.__parallel_join(batches, outpath)", "def square(side):\n rectangle(side,side)", "def run_split_with_solids(self,mc):\n top, bot = self.outs\n feed = self.ins[0]\n top.copy_like(feed)\n bot.copy_like(top)\n top_mass = top.mass\n F_mass_ins = sum(top_mass*self.split)\n F_mass_sol = top.F_mass - F_mass_ins\n F_mass_wat = top.imass['Water']\n x_sol = mc*F_mass_ins/(F_mass_wat-mc*F_mass_sol)\n self.split[self.split==0] = x_sol\n top_mass[:] *= self.split\n bot.mass[:] -= top_mass" ]
[ "0.7624191", "0.5538901", "0.5165043", "0.50714666", "0.50639325", "0.5047271", "0.5002455", "0.49778813", "0.4968627", "0.49685135", "0.495568", "0.49273586", "0.48960102", "0.48864695", "0.48659024", "0.48147792", "0.48095623", "0.48085663", "0.4803549", "0.4795789", "0.4791983", "0.47913542", "0.47788063", "0.47756958", "0.47683707", "0.4766202", "0.47653475", "0.47649086", "0.47621554", "0.47607732", "0.47602662", "0.47516447", "0.4740979", "0.47319776", "0.47298658", "0.47254556", "0.4719377", "0.4710163", "0.4709591", "0.47094038", "0.47089627", "0.47050118", "0.4697713", "0.46899602", "0.4689379", "0.46835423", "0.4681663", "0.46795008", "0.46789244", "0.46719807", "0.46696204", "0.4663903", "0.46596223", "0.46553975", "0.46549532", "0.46519977", "0.46477568", "0.46429408", "0.46417028", "0.4641305", "0.46410072", "0.4636426", "0.46362668", "0.46283057", "0.4626686", "0.4621757", "0.46185586", "0.46123978", "0.4606156", "0.46053132", "0.46030965", "0.46020955", "0.45992854", "0.45966044", "0.4593391", "0.45923984", "0.45907626", "0.4587361", "0.4584178", "0.4583306", "0.45703098", "0.45670953", "0.4566498", "0.45660076", "0.45631924", "0.45591065", "0.45591065", "0.45585123", "0.45579913", "0.45530605", "0.4552559", "0.45522082", "0.45504895", "0.45499313", "0.4547563", "0.45454785", "0.45443395", "0.45443138", "0.45423996" ]
0.7545948
2
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
def _get_updated_endpoints(original_end_points, name): end_points = dict(original_end_points) end_points['logits'] = tf.squeeze(end_points[name], [1, 2]) end_points['probs'] = tf.nn.softmax(end_points['logits']) return end_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def init_output_dict(self):\n return {\n \"outputs\": torch.FloatTensor(),\n \"pred_probs\": torch.FloatTensor(),\n \"labels\": torch.LongTensor(),\n }", "def postprocess(self, prediction_dict):\r\n #三个通道的网络需要全连接层融合\r\n\r\n eyeFace_logits = prediction_dict['eyeFace_logits']\r\n eyeFace_logits = tf.nn.softmax(eyeFace_logits)\r\n logits = eyeFace_logits\r\n classes = tf.argmax(logits, 1)\r\n postprecessed_dict = {'classes': classes}\r\n return postprecessed_dict", "def feed_dict(self):\n return {self.lr_tensor: self.lr()}", "def resnet_endpoints(model):\n graph = tf.get_default_graph()\n scope = _get_resnet_scope()\n end_points = {}\n tensors = ['initial_conv', 'initial_max_pool', 'pre_final_pool',\n 'final_reduce_mean', 'final_dense']\n tensors += [\n 'block_layer{}'.format(i + 1) for i in range(len(model.block_sizes))]\n for name in tensors:\n tensor = graph.get_tensor_by_name('{}{}:0'.format(scope, name))\n if len(tensor.shape) == 4:\n tensor = _model_output(tensor, model.data_format)\n end_points[name] = tensor\n return end_points", "def inception_resnet_v2(inputs,\n reuse=None,\n scope='InceptionResnetV2'):\n end_points = {}\n\n with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n\n # 149 x 149 x 32\n net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n end_points['Conv2d_1a_3x3'] = net\n # 147 x 147 x 32\n net = slim.conv2d(net, 32, 3, padding='VALID',\n scope='Conv2d_2a_3x3')\n end_points['Conv2d_2a_3x3'] = net\n # 147 x 147 x 64\n net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')\n end_points['Conv2d_2b_3x3'] = net\n # 73 x 73 x 64\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_3a_3x3')\n end_points['MaxPool_3a_3x3'] = net\n # 73 x 73 x 80\n net = slim.conv2d(net, 80, 1, padding='VALID',\n scope='Conv2d_3b_1x1')\n end_points['Conv2d_3b_1x1'] = net\n # 71 x 71 x 192\n net = slim.conv2d(net, 192, 3, padding='VALID',\n scope='Conv2d_4a_3x3')\n end_points['Conv2d_4a_3x3'] = net\n # 35 x 35 x 192\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_5a_3x3')\n end_points['MaxPool_5a_3x3'] = net\n\n # 35 x 35 x 320\n with tf.variable_scope('Mixed_5b'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,\n scope='Conv2d_0b_5x5')\n with tf.variable_scope('Branch_2'):\n tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',\n scope='AvgPool_0a_3x3')\n tower_pool_1 = slim.conv2d(tower_pool, 64, 1,\n scope='Conv2d_0b_1x1')\n net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,\n tower_conv2_2, tower_pool_1])\n\n end_points['Mixed_5b'] = net\n net = slim.repeat(net, 10, block35, scale=0.17)\n\n # 17 x 17 x 1024\n with tf.variable_scope('Mixed_6a'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,\n scope='Conv2d_0b_3x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,\n stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])\n\n end_points['Mixed_6a'] = net\n net = slim.repeat(net, 20, block17, scale=0.10)\n\n with tf.variable_scope('Mixed_7a'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,\n tower_conv2_2, tower_pool])\n\n end_points['Mixed_7a'] = net\n\n net = slim.repeat(net, 9, block8, scale=0.20)\n net = block8(net, activation_fn=None)\n \n # GVH: Not sure if we want or need this convolution\n # 8 x 8 x 2080\n net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')\n end_points['Conv2d_7b_1x1'] = net\n \n # 8 x 8 x 1536\n return net, end_points", "def resnet_v2_v1net_config():\n config = ConfigDict()\n config.image_size = (299, 299)\n config.resnet_depth = 50\n config.ckpt_dir = \"pretrained_nets/resnet_v2_%s\" % config.resnet_depth\n config.num_classes = 1001\n config.add_v1net = True\n config.timesteps = 7\n config.v1net_filters = 32\n config.v1net_kernel_size = 5\n return config", "def generate_update_dict(self, input_data, input_labels=None, batch_step=0):\n update_dict = super(MlpModel, self).generate_update_dict(input_data, input_labels, batch_step)\n feed_dict = self.get_feed_dict(input_data, input_labels)\n sess = tf.compat.v1.get_default_session()\n train_on_adversarial = feed_dict[self.train_on_adversarial]\n if(train_on_adversarial):\n adv_feed_dict = feed_dict.copy()\n adv_feed_dict[self.use_adv_input] = True\n nadv_feed_dict = feed_dict.copy()\n nadv_feed_dict[self.use_adv_input] = False\n current_step = np.array(self.global_step.eval())\n logits_vals = sess.run(self.get_encodings(), feed_dict)\n logits_vals_max = np.array(logits_vals.max())\n logits_frac_act = np.array(np.count_nonzero(logits_vals) / float(logits_vals.size))\n stat_dict = {\"global_batch_index\":current_step,\n \"batch_step\":batch_step,\n \"number_of_batch_steps\":self.params.schedule[self.sched_idx][\"num_batches\"],\n \"schedule_index\":self.sched_idx,\n \"logits_max\":logits_vals_max,\n \"logits_frac_active\":logits_frac_act}\n if(train_on_adversarial):\n adv_accuracy = np.array(self.accuracy.eval(adv_feed_dict))\n nadv_accuracy = np.array(self.accuracy.eval(nadv_feed_dict))\n adv_loss = np.array(self.get_total_loss().eval(adv_feed_dict))\n nadv_loss = np.array(self.get_total_loss().eval(nadv_feed_dict))\n stat_dict[\"accuracy_adv\"] = adv_accuracy\n stat_dict[\"accuracy_nadv\"] = nadv_accuracy\n stat_dict[\"total_loss_adv\"] = adv_loss\n stat_dict[\"total_loss_nadv\"] = nadv_loss\n else:\n accuracy = np.array(self.accuracy.eval(feed_dict))\n total_loss = np.array(self.get_total_loss().eval(feed_dict))\n stat_dict[\"accuracy\"] = accuracy\n stat_dict[\"total_loss\"] = total_loss\n update_dict.update(stat_dict) #stat_dict overwrites\n eval_list = []\n grad_name_list = []\n learning_rate_list = []\n for w_idx, weight_grad_var in enumerate(self.grads_and_vars[self.sched_idx]):\n eval_list.append(weight_grad_var[0][0]) # [grad(0) or var(1)][value(0) or name(1)]\n grad_name = weight_grad_var[0][1].name.split('/')[1].split(':')[0] # 2nd is np.split\n grad_name_list.append(grad_name)\n learning_rate_list.append(self.learning_rates[self.sched_idx][w_idx])\n stat_dict = {}\n out_vals = tf.compat.v1.get_default_session().run(eval_list, feed_dict)\n out_lr = tf.compat.v1.get_default_session().run(learning_rate_list, feed_dict)\n for grad, name, lr in zip(out_vals, grad_name_list, out_lr):\n grad_max = np.array(grad.max())\n grad_min = np.array(grad.min())\n grad_mean = np.mean(np.array(grad))\n stat_dict[name+\"_grad_max_mean_min\"] = [grad_max, grad_mean, grad_min]\n stat_dict[name+\"_learning_rate\"] = lr\n update_dict.update(stat_dict) #stat_dict overwrites for same keys\n return update_dict", "def loss_and_target(self, point_pred: Tensor, rel_roi_points: Tensor,\n sampling_results: List[SamplingResult],\n batch_gt_instances: InstanceList,\n cfg: ConfigType) -> dict:\n rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n point_target = self.get_targets(rois, rel_roi_points, sampling_results,\n batch_gt_instances, cfg)\n if self.class_agnostic:\n loss_point = self.loss_point(point_pred, point_target,\n torch.zeros_like(pos_labels))\n else:\n loss_point = self.loss_point(point_pred, point_target, pos_labels)\n\n return dict(loss_point=loss_point, point_target=point_target)", "def _create_output_alternatives(self, predictions):\n return {self.head_name: (self._problem_type, predictions)}", "def lightened_v2(inputs, is_training=True,\n dropout_keep_prob=0.8,\n reuse=None,\n scope='LightenedV1'):\n end_points = {}\n \n with tf.variable_scope(scope, 'LightenedV1', [inputs], reuse=reuse):\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n \n endpoints = {}\n \n net=conv(inputs, 48, 9, stride=1, padding='VALID', scope='conv1_9x9')\n end_points['conv1_9x9'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool1')\n end_points['pool1'] = net\n \n net=conv(net, 96, 5, stride=1, padding='VALID', scope='conv2_5x5')\n end_points['conv2_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool2')\n end_points['pool2'] = net\n\n net=conv(net, 128, 5, stride=1, padding='VALID', scope='conv3_5x5')\n end_points['conv3_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool3')\n end_points['pool3'] = net\n\n net=conv(net, 192, 4, stride=1, padding='VALID', scope='conv4_4x4')\n end_points['conv4_5x5'] = net\n net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='pool4')\n end_points['pool4'] = net\n\n with tf.variable_scope('Logits'):\n net = slim.flatten(net)\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='Dropout') \n endpoints['PreLogitsFlatten'] = net\n \n return net, end_points", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def _set_resnet_arg_scope(self):\n vs_initializer = tf.keras.initializers.VarianceScaling(2.0)\n l2_regularizer = tf.keras.regularizers.l2(self.config.GENERATOR_WEIGHT_DECAY)\n for layer in self.resnet50V2.layers:\n if isinstance(layer, layers.Conv2D):\n # original implementations slim `resnet_arg_scope` additionally sets\n # `normalizer_fn` and `normalizer_params` which in TF 2.0 need to be implemented\n # as own layers. This is not possible using keras ResNet50V2 application.\n # Nevertheless this is not needed as training seems to be likely stable.\n # See https://www.tensorflow.org/guide/migrate#a_note_on_slim_contriblayers for more\n # migration insights\n setattr(layer, 'padding', 'same')\n setattr(layer, 'kernel_initializer', vs_initializer)\n setattr(layer, 'kernel_regularizer', l2_regularizer)\n if isinstance(layer, layers.BatchNormalization):\n setattr(layer, 'momentum', 0.997)\n setattr(layer, 'epsilon', 1e-5)\n if isinstance(layer, layers.MaxPooling2D):\n setattr(layer, 'padding', 'same')", "def eval_additional_scores(self, **kwargs):\n self.model.eval()\n self.likelihood.eval()\n\n X_train_torch = torch.from_numpy(kwargs[\"X_train\"]).to(self.device)\n y_train_torch = torch.from_numpy(kwargs[\"y_train\"]).to(self.device)\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=y_train_torch.numel())\n\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(self.num_likelihood_samples):\n f_pred = self.model(X_train_torch)\n elbo = mll(f_pred, y_train_torch).item()\n\n return {\n \"elbo\": elbo\n }", "def build_feed_dict(self, input_frames, gt_output_frames, generator):\n feed_dict = {}\n batch_size = np.shape(gt_output_frames)[0]\n\n ##\n # Get generated frames from GeneratorModel\n ##\n\n g_feed_dict = {generator.input_frames_train: input_frames,\n generator.gt_frames_train: gt_output_frames}\n g_scale_preds = self.sess.run(generator.scale_preds_train, feed_dict=g_feed_dict)\n\n ##\n # Create discriminator feed dict\n ##\n for scale_num in xrange(self.num_scale_nets):\n scale_net = self.scale_nets[scale_num]\n\n # resize gt_output_frames\n scaled_gt_output_frames = np.empty([batch_size, scale_net.height, scale_net.width, 3])\n for i, img in enumerate(gt_output_frames):\n\t\t# for skimage.transform.resize, images need to be in range [0, 1], so normalize to\n # [0, 1] before resize and back to [-1, 1] after\n sknorm_img = (img / 2) + 0.5\n\n\n # https://github.com/dyelax/Adversarial_Video_Generation/issues/18\n sknorm_img = np.minimum(sknorm_img, 1)\n sknorm_img = np.maximum(sknorm_img, 0)\n\n\n\n resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3])\n scaled_gt_output_frames[i] = (resized_frame - 0.5) * 2\n\n # combine with resized gt_output_frames to get inputs for prediction\n scaled_input_frames = np.concatenate([g_scale_preds[scale_num],\n scaled_gt_output_frames])\n\n # convert to np array and add to feed_dict\n feed_dict[scale_net.input_frames] = scaled_input_frames\n\n # add labels for each image to feed_dict\n batch_size = np.shape(input_frames)[0]\n feed_dict[self.labels] = np.concatenate([np.zeros([batch_size, 1]),\n np.ones([batch_size, 1])])\n\n return feed_dict", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def fprop(self, x):\n\n if x is self.x:\n return self.end_points\n\n else:\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=self.num_classes,\n is_training=False, reuse=tf.AUTO_REUSE)\n\n return _get_updated_endpoints(end_points, 'resnet_v2_50/logits')", "def rl_modelrl_ae_l2_base():\n hparams = rl_modelrl_ae_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_50, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])", "def on_predict_end(self, logs=None):", "def on_predict_end(self, logs=None):", "def rl_modelrl_l2_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def add_metrics_to_db(self) -> None:\n\n model = {\n 'id': 'model1',\n 'name': 'Housing Price Prediction',\n 'metrics': {\n 'mean_squared_error': mean_squared_error(self._y_test, self._predictions),\n 'mean_absolute_error': mean_absolute_error(self._y_test, self._predictions),\n 'r2_score': r2_score(self._y_test, self._predictions)\n }\n }\n\n self._db.add_model(model)", "def __init__(self, players, prob_end, game, deterministic_cache):\n super(ProbEndRoundRobinMatches, self).__init__(\n players, turns=float(\"inf\"), game=game,\n deterministic_cache=deterministic_cache)\n self.deterministic_cache.mutable = False\n self.prob_end = prob_end", "def add_prediction_endpoint(self, endpoint_id, saved_model_id):\n self.settings[\"endpoints\"].append({\n \"id\" : endpoint_id,\n \"type\" : \"STD_PREDICTION\",\n \"modelRef\": saved_model_id\n })", "def inception_resnet_v1(inputs,\r\n is_training=True,\r\n dropout_keep_prob=0.8,\r\n bottleneck_layer_size=128,\r\n reuse=None,\r\n scope='InceptionResnetV1'):\r\n end_points = {}\r\n\r\n with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):\r\n with slim.arg_scope([slim.batch_norm, slim.dropout],\r\n is_training=is_training):\r\n with slim.arg_scope(\r\n [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1,\r\n padding='SAME'):\r\n\r\n # stem\r\n net = stem(inputs)\r\n end_points['stem_out'] = net\r\n\r\n # 5 x Inception-resnet-A\r\n net = slim.repeat(\r\n net,\r\n 5,\r\n inception_resnet_a,\r\n scale=0.17,\r\n scope=\"inception_resnet_a\")\r\n end_points['inception_resnet_a_out'] = net\r\n\r\n # Reduction-A\r\n with tf.variable_scope('reduction_a'):\r\n net = reduction_a(net, 192, 192, 256, 384)\r\n end_points['reduction_a_out'] = net\r\n\r\n # 10 x Inception-Resnet-B\r\n net = slim.repeat(\r\n net,\r\n 10,\r\n inception_resnet_b,\r\n scale=0.10,\r\n scope=\"inception_resnet_b\")\r\n end_points['inception_resnet_b_out'] = net\r\n\r\n # Reduction-B\r\n with tf.variable_scope('reduction_b'):\r\n net = reduction_b(net)\r\n end_points['reduction_b_out'] = net\r\n\r\n # 5 x Inception-Resnet-C\r\n net = slim.repeat(\r\n net,\r\n 5,\r\n inception_resnet_c,\r\n scale=0.20,\r\n scope=\"inception_resnet_c\")\r\n end_points['inception_resnet_c_out'] = net\r\n\r\n # Average Pooling层,输出为8×8×1792\r\n net = slim.avg_pool2d(\r\n net,\r\n net.get_shape()[1:3],\r\n padding='VALID',\r\n scope='avgpool_8x8')\r\n\r\n # 扁平除了batch_size维度的其它维度。使输出变为:[batch_size, ...]\r\n net = slim.flatten(net)\r\n\r\n # dropout层\r\n net = slim.dropout(\r\n net, dropout_keep_prob, is_training=False, scope='Dropout')\r\n end_points['PreLogitsFlatten'] = net\r\n\r\n # 全链接层。输出为batch_size×128\r\n net = slim.fully_connected(\r\n net,\r\n bottleneck_layer_size,\r\n activation_fn=None,\r\n scope='logits',\r\n reuse=False)\r\n\r\n return net", "def on_predict_end(self, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None):\n pass", "def loss_functions(self) -> dict:\n tmp_dict = dict()\n tmp_dict['From_Root'] = (('Cov', self.log_post_cov, self.constraint_cov),\n ('Beta', self.log_post_beta, self.constraint_sigma),\n ('Lambda', self.log_post_lambda, self.constraint_sigma),\n ('Psi', self.log_post_psi, self.constraint_psi),\n ('Theta', self.log_post_theta, self.constraint_theta),\n ('Tree', self.log_post_tree, self.constraint_tree))\n\n tmp_dict['Likelihood'] = (self.log_likelihood, ('Psi',\n 'Beta',\n 'Theta',\n 'Lambda'))\n return tmp_dict", "def model_2_parameters(num_features, num_classes):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n \n return parameters", "def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)", "def target_multi_objective2(\n config: Configuration,\n seed: int,\n # instance: str,\n # budget: float,\n) -> dict[str, float]:\n return {\"cost1\": seed, \"cost2\": seed}", "def on_epoch_end(self, epoch, logs={}):\n for (k, v) in logs.items():\n l = self.H.get(k, [])\n l.append(v)\n self.H[k] = l \n # check to see if the training history should be serialized to file\n if self.jsonPath is not None:\n f = open(self.jsonPath, \"w\")\n # Encodes a Python object as a JSON string.\n f.write(json.dumps(self.H))\n f.close() \n # ensure at least two epochs have passed before plotting\n # (epoch starts at zero)\n if len(self.H[\"loss\"]) > 1:\n N = np.arange(0, len(self.H[\"loss\"]))\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(N, self.H[\"loss\"], label=\"train_loss\")\n plt.plot(N, self.H[\"val_loss\"], label=\"val_loss\")\n plt.plot(N, self.H[\"acc\"], label=\"train_acc\")\n plt.plot(N, self.H[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Acc [Epoch {}]\".format(\n len(self.H[\"loss\"])))\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Acc\")\n plt.legend()\n plt.savefig(self.figPath)\n plt.close()", "def resnet_model_fn(features, labels, mode, params):\n tf.summary.image('images', features, max_outputs=6)\n\n network = resnet_model.imagenet_resnet_v2(\n params['resnet_size'], _LABEL_CLASSES, params['data_format'])\n if params['optimizer'] == 'kfac':\n with kfac_layer_collection() as lc:\n logits = network(features, mode == tf.estimator.ModeKeys.TRAIN)\n else:\n logits = network(features, mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions = {\n 'classes': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n cross_entropy = tf.losses.softmax_cross_entropy(\n logits=logits, onehot_labels=labels)\n\n # Create a tensor named cross_entropy for logging purposes.\n tf.identity(cross_entropy, name='cross_entropy')\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n # Add weight decay to the loss.\n loss = cross_entropy + _WEIGHT_DECAY * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.trainable_variables()\n if 'batch_normalization' not in v.name])\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Scale the learning rate linearly with the batch size. When the batch size\n # is 256, the learning rate should be 0.1.\n initial_learning_rate = params['lr']\n # batches_per_epoch = _NUM_IMAGES['train'] / params['batch_size']\n global_step = tf.train.get_or_create_global_step()\n\n # Multiply the learning rate by 0.1 at 30, 60, 80, and 90 epochs.\n learning_rate = initial_learning_rate\n # Create a tensor named learning_rate for logging purposes.\n tf.identity(learning_rate, name='learning_rate')\n tf.summary.scalar('learning_rate', learning_rate)\n\n if params['optimizer'] == 'meta':\n optimizer = co.MetaHessionFreeOptimizer(learning_rate=learning_rate,\n iter=params['CG_iter'],\n x_use=params['x_use'],\n y_use=params['y_use'],\n d_use=params['d_use'],\n damping=params['damping'])\n elif params['optimizer'] == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=params['beta1'],\n beta2=params['beta2'])\n elif params['optimizer'] == 'RMSprop':\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=params['decay'])\n elif params['optimizer'] == 'SGD':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n elif params['optimizer'] == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=params['momentum'])\n elif params['optimizer'] == 'kfac':\n optimizer = tfcb.kfac.optimizer.KfacOptimizer(learning_rate=1,\n cov_ema_decay=0.9,\n damping=learning_rate,\n momentum_type='qmodel',\n momentum=0,\n layer_collection=lc.layer_collection)\n else:\n raise ValueError\n\n # Batch norm requires update_ops to be added as a train_op dependency.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n if params['optimizer'] == 'meta':\n train_op = optimizer.minimize(loss_type='cross_entropy',\n out=logits,\n label=labels,\n input_list=[features],\n global_step=global_step)\n train_hooks = [co.MetaParametersLoadingHook(params['meta_ckpt'])]\n else:\n train_op = optimizer.minimize(loss, global_step=global_step)\n train_hooks = []\n else:\n train_op = None\n train_hooks = []\n\n accuracy = tf.metrics.accuracy(\n tf.argmax(labels, axis=1), predictions['classes'])\n metrics = {'accuracy': accuracy}\n\n # Create a tensor named train_accuracy for logging purposes\n tf.identity(accuracy[1], name='train_accuracy')\n tf.summary.scalar('train_accuracy', accuracy[1])\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=metrics,\n training_hooks=train_hooks)", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n val_acc = sum([x['n_correct_pred'] for x in outputs]) / sum(x['n_pred'] for x in outputs)\n tensorboard_logs = {'val_loss': avg_loss, 'val_acc': val_acc}\n return {'val_loss': avg_loss, 'log': tensorboard_logs}", "def process_label(intents, w2v,class_id_startpoint=0):\n class_dict = {}\n label_vec = []\n class_id = class_id_startpoint\n \n for line in intents:\n # check whether all the words in w2v dict\n line=line[0]\n label = line.split(' ')\n for w in label:\n if not w in w2v.vocab:\n print('not in w2v dict', w)\n\n # compute label vec\n label_sum = np.sum([w2v[w] for w in label], axis = 0)\n label_vec.append(label_sum)\n # store class names => index\n class_dict[' '.join(label)] = class_id\n class_id = class_id + 1\n #print('=====label vec', label_vec)\n return class_dict, np.asarray(label_vec)", "def inception_v2_classify(inputs,\n num_classes=2,\n is_training=True,\n dropout_keep_prob=0.8,\n depth_multiplier=1.0,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='InceptionV2'):\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n\n # Final pooling and prediction\n with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes],\n reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n end_points = {}\n\n with tf.variable_scope('Logits'):\n kernel_size = _reduced_kernel_size_for_small_input(inputs, [16, 16])\n net = slim.avg_pool2d(inputs, kernel_size, padding='VALID',\n scope='AvgPool_1a_{}x{}'.format(*kernel_size))\n # 1 x 1 x 1024\n net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points", "def fit_history(self, epochs, epoch_gen, epoch_disc, l1_lambda):\n self.history.get('epochs', []).append(epochs)\n self.history.get('epoch_gen', []).append(epoch_gen)\n self.history.get('epoch_disc', []).append(epoch_disc)\n self.history.get('l1_lambda', []).append(l1_lambda)", "def on_epoch_end(self, epoch, logs={}):\n self.losses.append(logs.get(\"loss\"))\n self.val_losses.append(logs.get(\"val_loss\"))", "def decode(self, output_dict, idx_2_rel_type):\n decoded_predictions = []\n\n predicted_relations = output_dict[\"predicted_relations\"]\n # Size: batch_size x padded_document_length x padded_document_length x n_classes\n # predicted_relations[l, i, j, k] == 1 iif we predict a relation k with ARG1==i, ARG2==j in the l-th sentence of the batch\n\n for instance_tags in predicted_relations:\n sentence_length = instance_tags.size(0)\n decoded_relations = []\n\n for arg1, arg2, rel_type_idx in instance_tags.nonzero().data:\n relation = [\"*\"] * sentence_length\n rel_type = idx_2_rel_type[rel_type_idx.item()]\n relation[arg1] = \"ARG1_\" + rel_type\n relation[arg2] = \"ARG2_\" + rel_type\n decoded_relations.append(relation)\n\n decoded_predictions.append(decoded_relations)\n\n output_dict[\"decoded_predictions\"] = decoded_predictions\n\n return output_dict", "def end_epoch(self, metrics, curves):\n raise NotImplementedError", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def update(self, rxn_probs):\n pass", "def add_experiences(\n self,\n curr_all_info: AllBrainInfo,\n next_all_info: AllBrainInfo,\n take_action_outputs: ActionInfoOutputs,\n ) -> None:\n self.trainer_metrics.start_experience_collection_timer()\n if take_action_outputs:\n self.stats[\"Policy/Entropy\"].append(take_action_outputs[\"entropy\"].mean())\n self.stats[\"Policy/Learning Rate\"].append(\n take_action_outputs[\"learning_rate\"]\n )\n for name, signal in self.policy.reward_signals.items():\n self.stats[signal.value_name].append(\n np.mean(take_action_outputs[\"value\"][name])\n )\n\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[\n agent_id\n ].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n tmp_rewards_dict = {}\n for name, signal in self.policy.reward_signals.items():\n tmp_rewards_dict[name] = signal.evaluate(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[\n agent_id\n ].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id][\"visual_obs%d\" % i].append(\n stored_info.visual_observations[i][idx]\n )\n self.training_buffer[agent_id][\"next_visual_obs%d\" % i].append(\n next_info.visual_observations[i][next_idx]\n )\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id][\"vector_obs\"].append(\n stored_info.vector_observations[idx]\n )\n self.training_buffer[agent_id][\"next_vector_in\"].append(\n next_info.vector_observations[next_idx]\n )\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros(\n (len(stored_info.agents), self.policy.m_size)\n )\n self.training_buffer[agent_id][\"memory\"].append(\n stored_info.memories[idx]\n )\n actions = stored_take_action_outputs[\"action\"]\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs[\"pre_action\"]\n self.training_buffer[agent_id][\"actions_pre\"].append(\n actions_pre[idx]\n )\n epsilons = stored_take_action_outputs[\"random_normal_epsilon\"]\n self.training_buffer[agent_id][\"random_normal_epsilon\"].append(\n epsilons[idx]\n )\n else:\n self.training_buffer[agent_id][\"action_mask\"].append(\n stored_info.action_masks[idx], padding_value=1\n )\n a_dist = stored_take_action_outputs[\"log_probs\"]\n # value is a dictionary from name of reward to value estimate of the value head\n value = stored_take_action_outputs[\"value\"]\n self.training_buffer[agent_id][\"actions\"].append(actions[idx])\n self.training_buffer[agent_id][\"prev_action\"].append(\n stored_info.previous_vector_actions[idx]\n )\n self.training_buffer[agent_id][\"masks\"].append(1.0)\n self.training_buffer[agent_id][\"done\"].append(\n next_info.local_done[next_idx]\n )\n\n for name, reward_result in tmp_rewards_dict.items():\n # 0 because we use the scaled reward to train the agent\n self.training_buffer[agent_id][\n \"{}_rewards\".format(name)\n ].append(reward_result.scaled_reward[next_idx])\n self.training_buffer[agent_id][\n \"{}_value_estimates\".format(name)\n ].append(value[name][idx][0])\n\n self.training_buffer[agent_id][\"action_probs\"].append(a_dist[idx])\n\n for name, rewards in self.collected_rewards.items():\n if agent_id not in rewards:\n rewards[agent_id] = 0\n if name == \"environment\":\n # Report the reward from the environment\n rewards[agent_id] += np.array(next_info.rewards)[next_idx]\n else:\n # Report the reward signals\n rewards[agent_id] += tmp_rewards_dict[name].scaled_reward[\n next_idx\n ]\n\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n self.trainer_metrics.end_experience_collection_timer()", "def resnet_v2(input_shape, depth, num_classes=7):\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n \n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n \n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n \n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n # num of param setting \n if stage == 0: # first stage\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer & first stage\n activation = None\n batch_normalization = False\n else: # second, third stage\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but no first stage\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # Linear projection residual shortcut connection to match\n # changed dims\n # at the first time, make a shortcut origin\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n # and add every reputation\n x = keras.layers.add([x, y])\n \n num_filters_in = num_filters_out\n \n # Add classifier on top\n # v2 has BN_ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n \n # Instantiate model\n model = Model(inputs=inputs, outputs=outputs)\n \n return model", "def generate_dictionary(self, sess, dict_type=\"S2T\"):\n avg1, avg2 = self.calc_avg_dist(sess)\n s2t_dico = self.get_candidates(sess, avg1, avg2)\n print(\"Completed generating S2T dictionary of size \" + str(len(s2t_dico)))\n if dict_type == \"S2T\":\n map_src_ind = np.asarray([s2t_dico[x][0] for x in range(len(s2t_dico))])\n tra_tgt_ind = np.asarray([s2t_dico[x][1] for x in range(len(s2t_dico))])\n return [map_src_ind, tra_tgt_ind]\n if dict_type == \"S2T&T2S\":\n # This case we are running Target 2 Source mappings\n t2s_dico = self.get_candidates(sess, avg2, avg1, swap_score=True)\n print(\"Completed generating T2S dictionary of size \" + str(len(t2s_dico)))\n t2s_dico = np.concatenate([t2s_dico[:, 1:], t2s_dico[:, :1]], 1)\n # Find the common pairs between S2T and T2S\n s2t_candi = set([(a, b) for a, b in s2t_dico])\n t2s_candi = set([(a, b) for a, b in t2s_dico])\n final_pairs = s2t_candi & t2s_candi\n dico = np.asarray(list([[a, b] for (a, b) in final_pairs]))\n print(\"Completed generating final dictionary of size \" + str(len(final_pairs)))\n return dico", "def _extend_network_dict_provider(self, context, network, bindings=None):\n if 'id' not in network:\n return\n if not bindings:\n bindings = nsx_db.get_network_bindings(context.session,\n network['id'])\n\n # With NSX plugin, \"normal\" overlay networks will have no binding\n if bindings:\n # Network came in through provider networks API\n network[pnet.NETWORK_TYPE] = bindings[0].binding_type\n network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid\n network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id", "def post_processing(sample_dict: NDict, task1: bool = True, task2: bool = True) -> dict:\n # verify sample\n expected_keys = [f\"target.{key}\" for key in EXPECTED_TARGET_KEYS]\n if task1:\n expected_keys += [f\"task1_pred.{key}\" for key in EXPECTED_TASK1_PRED_KEYS]\n if task2:\n expected_keys += [f\"task2_pred.{key}\" for key in EXPECTED_TASK2_PRED_KEYS]\n set(expected_keys).issubset(set(sample_dict.keypaths()))\n\n # convert scores to numpy array\n # task 1\n if task1:\n task1_pred = []\n for cls_name in TASK1_CLASS_NAMES:\n task1_pred.append(sample_dict[f\"task1_pred.{cls_name}-score\"])\n task1_pred_array = np.array(task1_pred)\n if not np.isclose(task1_pred_array.sum(), 1.0, rtol=0.05):\n print(\n f\"Warning: expecting task 1 prediction for case {sample_dict['descriptor']} to sum up to almost 1.0, got {task1_pred_array}\"\n )\n sample_dict[\"task1_pred.array\"] = task1_pred_array\n\n # task 2\n if task2:\n task2_pred = []\n for cls_name in TASK2_CLASS_NAMES:\n task2_pred.append(sample_dict[f\"task2_pred.{cls_name}-score\"])\n task2_pred_array = np.array(task2_pred)\n if not np.isclose(task2_pred_array.sum(), 1.0, rtol=0.05):\n print(\n f\"Error: expecting task 2 prediction for case {sample_dict['descriptor']} to sum up to almost 1.0, got {task2_pred_array}\"\n )\n sample_dict[\"task2_pred.array\"] = task2_pred_array\n\n return sample_dict", "def mobilenet_v2_base(inputs,\n final_endpoint='conv2d_8',\n output_stride=None,\n min_depth=8,\n depth_multiplier=1.0,\n scope=None):\n end_points = {}\n\n conv_defs = _CONV_DEFS\n\n #if output_stride is not None and output_stride not in [8, 16, 32]:\n # raise ValueError('Only allowed output_stride values are 8, 16, 32.')\n\n bottleneck_id=0\n with tf.variable_scope(scope, default_name='MobilenetV2',values=[inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):\n net = inputs\n for i, conv_def in enumerate(conv_defs):\n\n layer_stride = conv_def.stride\n layer_rate = 1\n\n if isinstance(conv_def, Conv):\n end_point= 'conv2d_%d' % i\n net = slim.conv2d(net, conv_def.depth, conv_def.kernel,\n stride=conv_def.stride,\n normalizer_fn=slim.batch_norm,\n scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n elif isinstance(conv_def, InvResConv):\n for i in conv_def.repeat:\n end_point = 'bottleneck_%d' % bottleneck_id\n bottleneck_id+=1\n if i == 0 :\n net = bottleneck(net, conv_def.expansion, conv_def.depth,\n conv_def.stride, scope=end_point)\n else:\n net = bottleneck(net, conv_def.expansion, conv_def.depth,\n 1, scope=end_point)\n\n end_points[end_point]=net\n if end_point == final_endpoint:\n return net, end_points\n else:\n raise ValueError('Unknown convolution type %s for layer %d'\n % (conv_def.ltype, i))\n\n\n raise ValueError('Unknown final endpoint %s' % final_endpoint)", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs}", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs}", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def get_ytm_dict(self):\n ytm=self.ytm\n for term in self.Rmn.keys():\n ytm = Bootstrapping.bisection(self,0.001, 0.1, 1e-10, 2 * term, self.Rmn, ytm)\n return ytm", "def add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(num_fc_neurons, activation='relu')(x)\n x = Dropout(dropout)(x)\n x = Dense(num_fc_neurons, activation='relu')(x)\n x = Dropout(dropout)(x)\n predictions = Dense(nb_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model", "def resnet10_2D(num_classes, in_channels, ngf, last_sigmoid=False):\n model = ResNet2D(BasicBlock2D, [1, 1, 1, 1], num_classes, in_channels, ngf=ngf, last_sigmoid=last_sigmoid)\n return model", "def omniglot_resnet_v2(resnet_size, num_classes, data_format = None):\n\n\n model_params = {\n 18: {'blcok': building_block, 'layers': [2,2,2,2]},\n 34: {'block': building_block, 'layers': [3,4,6,3]},\n 50: {'block': bottleneck_block, 'layers': [3,4,6,3]},\n 101: {'block': bottleneck_block, 'layers': [3,4,6,3]},\n 152: {'blcok': bottleneck_block, 'layers': [3,8,36,3]}\n }\n\n if resnet_size not in model_params:\n raise ValueError('not a valid resnet_size:', resnet_size)\n\n\n params = model_params[resnet_size]\n return omniglot_resnet_v2_generator(params['blcok'], params['layers'], num_classes, data_format)", "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = AveragePooling2D((8, 8), border_mode='valid', name='avg_pool')(x)\r\n x = Dropout(0.25)(x)\r\n x = Flatten()(x)\r\n predictions = Dense(7, activation='softmax')(x)\r\n model = Model(input=base_model.input, output=predictions)\r\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50_earlyexit(**kwargs):\n model = ResNetEarlyExit(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def optimize_ppo2(trial):\n\treturn {\n\t\t'n_steps': int(trial.suggest_loguniform('n_steps', 64, 2048)),\n\t\t'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999),\n\t\t'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),\n\t\t'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1),\n\t\t'cliprange': trial.suggest_uniform('cliprange', 0.1, 0.4),\n\t\t'noptepochs': int(trial.suggest_loguniform('noptepochs', 1, 48)),\n\t\t'lam': trial.suggest_uniform('lam', 0.8, 1.)\n\t}", "def assign_ppn_candidates(data_dict, result_dict):\n \n result = {}\n for key, val in result_dict.items():\n result[key] = [val]\n \n ppn = uresnet_ppn_type_point_selector(result['input_rescaled'][0],\n result, entry=0, \n apply_deghosting=False)\n \n ppn_voxels = ppn[:, 1:4]\n ppn_score = ppn[:, 5]\n ppn_type = ppn[:, 12]\n if 'ppn_classify_endpoints' in result:\n ppn_endpoint = ppn[:, 13:]\n assert ppn_endpoint.shape[1] == 2\n\n ppn_candidates = []\n for i, pred_point in enumerate(ppn_voxels):\n pred_point_type, pred_point_score = ppn_type[i], ppn_score[i]\n x, y, z = ppn_voxels[i][0], ppn_voxels[i][1], ppn_voxels[i][2]\n if 'ppn_classify_endpoints' in result:\n ppn_candidates.append(np.array([x, y, z, \n pred_point_score, \n pred_point_type, \n ppn_endpoint[i][0],\n ppn_endpoint[i][1]]))\n else:\n ppn_candidates.append(np.array([x, y, z, \n pred_point_score, \n pred_point_type]))\n\n if len(ppn_candidates):\n ppn_candidates = np.vstack(ppn_candidates)\n else:\n enable_classify_endpoints = 'ppn_classify_endpoints' in result\n ppn_candidates = np.empty((0, 5 if not enable_classify_endpoints else 7), \n dtype=np.float32)\n \n # match_points_to_particles(ppn_candidates, result_dict['particles'])\n \n return {}", "def finalize_output_dict(self):\n self.output_dict = {\n key: torch.cat(value).numpy() for key, value in self.output_dict.items()\n }", "def default_hparams():\n return {\n 'initializer': None,\n 'num_heads': 8,\n 'output_dim': 512,\n 'num_units': 512,\n 'dropout_rate': 0.1,\n 'use_bias': False,\n 'name': 'multihead_attention_rpr',\n 'is_decoder': False,\n 'relative_attention_num_buckets': 32\n }", "def resnext50(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def create_net(num_classes=1001, sample_shape=(3, 299, 299), is_training=True,\n dropout_keep_prob=0.8, final_endpoint='InceptionV4/Mixed_7d',\n aux_endpoint='InceptionV4/Mixed_6e'):\n end_points = {}\n name = 'InceptionV4'\n net, end_points = inception_v4_base(sample_shape,\n final_endpoint=final_endpoint,\n aux_endpoint=aux_endpoint)\n # Auxiliary Head logits\n if aux_endpoint is not None:\n # 17 x 17 x 1024\n aux_logits = end_points[aux_endpoint + '-aux']\n blk = name + '/AuxLogits'\n net.add(AvgPooling2D('%s/AvgPool_1a_5x5' % blk, 5, stride=3,\n border_mode='VALID'), aux_logits)\n t = conv2d(net, '%s/Conv2d_1b_1x1' % blk, 128, 1)\n conv2d(net, '%s/Conv2d_2a' % blk, 768,\n t.get_output_sample_shape()[1:3], border_mode='VALID')\n net.add(Flatten('%s/flat' % blk))\n end_points[blk] = net.add(Dense('%s/Aux_logits' % blk, num_classes))\n\n # Final pooling and prediction\n # 8 x 8 x 1536\n blk = name + '/Logits'\n last_layer = end_points[final_endpoint]\n net.add(AvgPooling2D('%s/AvgPool_1a' % blk,\n last_layer.get_output_sample_shape()[1:3],\n border_mode='VALID'),\n last_layer)\n # 1 x 1 x 1536\n net.add(Dropout('%s/Dropout_1b' % blk, 1 - dropout_keep_prob))\n net.add(Flatten('%s/PreLogitsFlatten' % blk))\n # 1536\n end_points[blk] = net.add(Dense('%s/Logits' % blk, num_classes))\n return net, end_points", "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(fc_size, activation='relu')(x) #new FC layer, random init\r\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\r\n model = Model(inputs=base_model.input, outputs=predictions)\r\n return model", "def resnet18_2D(num_classes, in_channels, ngf, last_sigmoid=False):\n model = ResNet2D(BasicBlock2D, [2, 2, 2, 2], num_classes, in_channels, ngf=ngf, last_sigmoid=last_sigmoid)\n return model", "def probOut(self) -> dict:\n \n return {\n j: sum(\n [ self.mat[i][j] * self.probIn[i] for i in self.simbIn ]\n ) for j in self.simbOut\n }", "def TEM_loss(anchors_action,anchors_startend,\n Y_action,Y_startend,config): \n loss_action,num_sample_action = binary_logistic_loss(Y_action,anchors_action)\n loss_startend,num_sample_startend = binary_logistic_loss(Y_startend,anchors_startend)\n # print('loss_action', loss_action)\n # print('loss_startend', loss_startend)\n loss={\"loss_action\":loss_action,\"num_sample_action\":num_sample_action,\n \"loss_startend\":loss_startend,\"num_sample_startend\":num_sample_startend}\n return loss", "def apply_edges(self, edges):\n h = torch.cat([edges.src['h'], edges.dst['h']], 1)\n out_score = self.W2(F.relu(self.W1(h))).squeeze(1)\n out_label = torch.round(torch.sigmoid(out_score))\n # print(out_score, out_label)\n out_dict = {'score': out_score, 'label': out_label}\n return out_dict", "def predict_proba(self):\n ...", "def modified_resnet10(self) -> torch.nn.Module:\n # initialize a Resnet-10 instance\n net = torchvision.models.resnet._resnet(arch=\"resnet10\", block=torchvision.models.resnet.BasicBlock, layers=[1, 1, 1, 1], pretrained=False, progress=False)\n\n # the first layer will be a lazy convolutional layer with any input channels\n net.conv1 = torch.nn.LazyConv2d(\n out_channels=64,\n kernel_size=(7, 7),\n stride=(2, 2),\n padding=(3, 3),\n bias=not self.bn_affine\n )\n\n # modify batch-norm layer to have momentum 1 and no tracking statistics\n net.bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer1[0].bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer1[0].bn2 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer2[0].bn1 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].bn2 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].downsample[1] = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer3[0].bn1 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].bn2 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].downsample[1] = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer4[0].bn1 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].bn2 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].downsample[1] = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n # last layer\n if self.dim_output is not None:\n net.fc = torch.nn.LazyLinear(out_features=self.dim_output)\n else:\n net.fc = torch.nn.Identity()\n\n # add dropout-2d after layers 1, 2, and 3\n net.maxpool.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer1[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer1[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer1.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer2[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer2[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer2.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer3[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer3[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer3.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer4[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer4[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer4.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n return net", "def fit_config(rnd: int) -> Dict[str, str]:\n config = {\n \"epoch_global\": str(rnd),\n \"epochs\": str(1),\n \"batch_size\": str(32),\n \"learning_rate\": str(0.001),\n }\n return config", "def train_loop_end(self):\r\n result = {}\r\n for task_name, loss in self.training_losses.items():\r\n result[task_name] = {loss.name: loss.result()}\r\n for task_name, task_metrics in self.training_metrics.items():\r\n result[task_name].update(\r\n {metric.name: metric.result() for metric in task_metrics})\r\n # Note that, the learning rate schedule is managed by the keras optimizer\r\n # internally, which respects the number of backward pass as `iterations`.\r\n # The learning rate schedule does not follow the trainer logical global\r\n # step of multiple tasks.\r\n if callable(self.optimizer.learning_rate):\r\n result[\"learning_rate\"] = self.optimizer.learning_rate(\r\n self.optimizer.iterations)\r\n else:\r\n result[\"learning_rate\"] = self.optimizer.learning_rate\r\n return result", "def create_feed_dict(self, inputs_batch, mask_batch, length_batch ,labels_batch=None,dropout=1):\n ### YOUR CODE (~6-10 lines)\n feed_dict = {}\n if labels_batch is not None:\n feed_dict[self.labels_placeholder] = labels_batch\n if inputs_batch is not None:\n feed_dict[self.input_placeholder] = inputs_batch\n if length_batch is not None:\n feed_dict[self.length_placeholder] = length_batch\n if dropout is not None:\n feed_dict[self.dropout_placeholder] = dropout\n if mask_batch is not None:\n feed_dict[self.mask_placeholder] = mask_batch\n ### END YOUR CODE\n return feed_dict", "def on_hparams_found(self, values: tuple, lr_min: float, lr_max: float,\r\n batch_size: int) -> None:\r\n pass", "def mid2d(self, endroit2D: Endroit2D) -> dict:\n\n return {\"point2D\": self.p2ddict.mid2d(endroit2D.p2ddict).getDict()}", "def inception_v4_base(sample_shape, final_endpoint='Inception/Mixed_7d',\n aux_endpoint='Inception/Mixed_6e'):\n name = 'InceptionV4'\n end_points = {}\n net = ffnet.FeedForwardNet()\n\n def final_aux_check(block_name):\n if block_name == final_endpoint:\n return True\n if block_name == aux_endpoint:\n aux = aux_endpoint + '-aux'\n end_points[aux] = net.add(Split(aux, 2))\n return False\n\n # 299 x 299 x 3\n blk = name + '/Conv2d_1a_3x3'\n net.add(Conv2D(blk, 32, 3, 2, border_mode='VALID', use_bias=False,\n input_sample_shape=sample_shape))\n net.add(BatchNormalization('%s/BatchNorm' % blk))\n end_points[blk] = net.add(Activation('%s/relu' % blk))\n if final_aux_check(blk):\n return net, end_points\n\n # 149 x 149 x 32\n blk = name + '/Conv2d_2a_3x3'\n end_points[blk] = conv2d(net, blk, 32, 3, border_mode='VALID')\n if final_aux_check(blk):\n return net, end_points\n\n # 147 x 147 x 32\n blk = name + '/Conv2d_2b_3x3'\n end_points[blk] = conv2d(net, blk, 64, 3)\n if final_aux_check(blk):\n return net, end_points\n\n # 147 x 147 x 64\n blk = name + '/Mixed_3a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = net.add(MaxPooling2D('%s/Branch_0/MaxPool_0a_3x3' % blk, 3, 2,\n border_mode='VALID'), s)\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_3x3' % blk, 96, 3, 2,\n border_mode='VALID', src=s)\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 73 x 73 x 160\n blk = name + '/Mixed_4a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 64, 1, src=s)\n br0 = conv2d(net, '%s/Branch_0/Conv2d_1a_3x3' % blk, 96, 3,\n border_mode='VALID')\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 64, 1, src=s)\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, 64, (1, 7))\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, 64, (7, 1))\n br1 = conv2d(net, '%s/Branch_1/Conv2d_1a_3x3' % blk, 96, 3,\n border_mode='VALID')\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 71 x 71 x 192\n blk = name + '/Mixed_5a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_1a_3x3' % blk, 192, 3, 2,\n border_mode='VALID', src=s)\n br1 = net.add(MaxPooling2D('%s/Branch_1/MaxPool_1a_3x3' % blk, 3, 2,\n border_mode='VALID'), s)\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 35 x 35 x 384\n # 4 x Inception-A blocks\n for idx in range(4):\n blk = name + '/Mixed_5' + chr(ord('b') + idx)\n end_points[blk] = block_inception_a(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 35 x 35 x 384\n # Reduction-A block\n blk = name + '/Mixed_6a'\n end_points[blk] = block_reduction_a(blk, net)\n if final_aux_check(blk):\n return net, end_points[blk], end_points\n\n # 17 x 17 x 1024\n # 7 x Inception-B blocks\n for idx in range(7):\n blk = name + '/Mixed_6' + chr(ord('b') + idx)\n end_points[blk] = block_inception_b(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 17 x 17 x 1024\n # Reduction-B block\n blk = name + '/Mixed_7a'\n end_points[blk] = block_reduction_b(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 8 x 8 x 1536\n # 3 x Inception-C blocks\n for idx in range(3):\n blk = name + '/Mixed_7' + chr(ord('b') + idx)\n end_points[blk] = block_inception_c(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n assert final_endpoint == blk, \\\n 'final_enpoint = %s is not in the net' % final_endpoint", "def rpn_layer(base_layer, num_anchors):\n x = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", kernel_initializer=\"normal\",\n name=\"rpn_conv1\")(base_layer)\n\n class_output = Conv2D(num_anchors, (1, 1), activation=\"sigmoid\", kernel_initializer=\"uniform\",\n name=\"rpn_class_output\")(x)\n regr_output = Conv2D(4 * num_anchors, (1, 1), activation=\"linear\", kernel_initializer=\"zero\",\n name=\"rpn_regr_output\")(x)\n\n return class_output, regr_output, base_layer", "def _get_net_and_params(self, xgraph: XGraph, last_layers: List[str]):\n # TODO Remove hardcoding parameter retrieval \n\n net = []\n params = {}\n last_layer_cnt = 1\n last_layer_tops = set([])\n\n for X in xgraph.get_layers():\n\n if X.name in last_layer_tops:\n last_layer_tops = last_layer_tops.union(tuple(X.tops))\n continue\n\n if 'Convolution' in X.type or 'Conv2DTranspose' in X.type:\n if not isinstance(X.data, xlayer.ConvData):\n raise ValueError(\n \"Invalid convolution data type: {}, should be \"\n \" xlayer.ConvData\".format(type(X.data)))\n # OIHW\n params[X.name + '_kernel'] = X.data.weights\n params[X.name + '_biases'] = X.data.biases\n elif 'Dense' in X.type:\n if not isinstance(X.data, xlayer.ConvData):\n raise ValueError(\n \"Invalid inner product data type: {}, should be \"\n \" xlayer.ConvData\".format(type(X.data)))\n # OIHW\n params[X.name + '_weights'] = X.data.weights\n params[X.name + '_biases'] = X.data.biases\n elif 'BatchNorm' in X.type:\n if not isinstance(X.data, xlayer.BatchData):\n raise ValueError(\n \"Invalid batchnorm data type: {}, should be\"\n \" xlayer.BatchData\".format(type(X.data)))\n # channels\n params[X.name + '_mu'] = X.data.mu\n params[X.name + '_variance'] = X.data.sigma_square\n params[X.name + '_gamma'] = X.data.gamma\n params[X.name + '_beta'] = X.data.beta\n elif 'Scale' in X.type:\n if not isinstance(X.data, xlayer.ScaleData):\n raise ValueError(\n \"Invalid scale data type: {}, should be\"\n \" xlayer.ScaleData\".format(type(X.data)))\n # channels\n params[X.name + '_gamma'] = X.data.gamma\n params[X.name + '_beta'] = X.data.beta\n elif 'BiasAdd' in X.type:\n assert X.data is not None\n params[X.name + '_bias'] = X.data[0]\n elif 'Eltwise' in X.type:\n if X.data != []:\n params[X.name + '_beta'] = X.data[0]\n\n net.append(X)\n\n if last_layers is not None and X.name in last_layers:\n if last_layer_cnt == len(last_layers):\n break\n else:\n last_layer_cnt += 1\n last_layer_tops = last_layer_tops.union(tuple(X.tops))\n\n return net, params", "def on_predict_batch_end(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def training_epoch_end(self, outputs: Any) -> None:\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()", "def inception_resnet_v1_mini(inputs,\r\n is_training,\r\n dropout_keep_prob=0.8,\r\n bottleneck_layer_size=128,\r\n reuse=None,\r\n scope='InceptionResnetV1'):\r\n end_points = {}\r\n\r\n with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):\r\n with slim.arg_scope([slim.batch_norm, slim.dropout],\r\n is_training=is_training):\r\n with slim.arg_scope(\r\n [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1,\r\n padding='SAME'):\r\n\r\n # stem\r\n net = stem(inputs)\r\n end_points['stem_out'] = net\r\n\r\n # 1 x Inception-resnet-A\r\n net = slim.repeat(\r\n net,\r\n 2,\r\n inception_resnet_a,\r\n scale=0.17,\r\n scope=\"inception_resnet_a\")\r\n end_points['inception_resnet_a_out'] = net\r\n\r\n # Reduction-A\r\n with tf.variable_scope('reduction_a'):\r\n net = reduction_a(net, 192, 192, 256, 384)\r\n end_points['reduction_a_out'] = net\r\n\r\n # 2 x Inception-Resnet-B\r\n net = slim.repeat(\r\n net,\r\n 4,\r\n inception_resnet_b,\r\n scale=0.10,\r\n scope=\"inception_resnet_b\")\r\n end_points['inception_resnet_b_out'] = net\r\n\r\n # Reduction-B\r\n with tf.variable_scope('reduction_b'):\r\n net = reduction_b(net)\r\n end_points['reduction_b_out'] = net\r\n\r\n # 1 x Inception-Resnet-C\r\n net = slim.repeat(\r\n net,\r\n 2,\r\n inception_resnet_c,\r\n scale=0.20,\r\n scope=\"inception_resnet_c\")\r\n end_points['inception_resnet_c_out'] = net\r\n\r\n # Average Pooling层,输出为8×8×1792\r\n net = slim.avg_pool2d(\r\n net,\r\n net.get_shape()[1:3],\r\n padding='VALID',\r\n scope='avgpool_8x8')\r\n\r\n # 扁平除了batch_size维度的其它维度。使输出变为:[batch_size, ...]\r\n net = slim.flatten(net)\r\n\r\n # dropout层\r\n # net = slim.dropout(\r\n # net, dropout_keep_prob, is_training=False, scope='Dropout')\r\n # end_points['PreLogitsFlatten'] = net\r\n\r\n\r\n net = slim.fully_connected(\r\n net,\r\n bottleneck_layer_size,\r\n activation_fn=None,\r\n scope='logits',\r\n reuse=False)\r\n\r\n return net", "def add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init\n\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\n\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model", "def feed_dict(training_data=True):\n if training_data:\n xs, ys = mnist.train.next_batch(batch_size)\n kp = keep_prob\n else:\n xs, ys = mnist.validation.images, mnist.validation.labels\n kp = 1.0\n return {x: xs, t: ys, k: kp}", "def _postprocess_keypoints_multi_class(self, prediction_dict, classes,\n y_indices, x_indices, boxes,\n num_detections):\n total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict\n in self._kp_params_dict.values())\n batch_size, max_detections = _get_shape(classes, 2)\n kpt_coords_for_example_list = []\n kpt_scores_for_example_list = []\n for ex_ind in range(batch_size):\n # The tensors that host the keypoint coordinates and scores for all\n # instances and all keypoints. They will be updated by scatter_nd_add for\n # each keypoint tasks.\n kpt_coords_for_example_all_det = tf.zeros(\n [max_detections, total_num_keypoints, 2])\n kpt_scores_for_example_all_det = tf.zeros(\n [max_detections, total_num_keypoints])\n for task_name, kp_params in self._kp_params_dict.items():\n keypoint_heatmap = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]\n keypoint_offsets = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]\n keypoint_regression = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]\n instance_inds = self._get_instance_indices(\n classes, num_detections, ex_ind, kp_params.class_id)\n\n # Gather the feature map locations corresponding to the object class.\n y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1)\n x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1)\n if boxes is None:\n boxes_for_kpt_class = None\n else:\n boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1)\n\n # Postprocess keypoints and scores for class and single image. Shapes\n # are [1, num_instances_i, num_keypoints_i, 2] and\n # [1, num_instances_i, num_keypoints_i], respectively. Note that\n # num_instances_i and num_keypoints_i refers to the number of\n # instances and keypoints for class i, respectively.\n (kpt_coords_for_class, kpt_scores_for_class, _) = (\n self._postprocess_keypoints_for_class_and_image(\n keypoint_heatmap,\n keypoint_offsets,\n keypoint_regression,\n classes,\n y_indices_for_kpt_class,\n x_indices_for_kpt_class,\n boxes_for_kpt_class,\n ex_ind,\n kp_params,\n ))\n\n # Prepare the indices for scatter_nd. The resulting combined_inds has\n # the shape of [num_instances_i * num_keypoints_i, 2], where the first\n # column corresponds to the instance IDs and the second column\n # corresponds to the keypoint IDs.\n kpt_inds = tf.constant(kp_params.keypoint_indices, dtype=tf.int32)\n kpt_inds = tf.expand_dims(kpt_inds, axis=0)\n instance_inds_expand = tf.expand_dims(instance_inds, axis=-1)\n kpt_inds_expand = kpt_inds * tf.ones_like(instance_inds_expand)\n instance_inds_expand = instance_inds_expand * tf.ones_like(kpt_inds)\n combined_inds = tf.stack(\n [instance_inds_expand, kpt_inds_expand], axis=2)\n combined_inds = tf.reshape(combined_inds, [-1, 2])\n\n # Reshape the keypoint coordinates/scores to [num_instances_i *\n # num_keypoints_i, 2]/[num_instances_i * num_keypoints_i] to be used\n # by scatter_nd_add.\n kpt_coords_for_class = tf.reshape(kpt_coords_for_class, [-1, 2])\n kpt_scores_for_class = tf.reshape(kpt_scores_for_class, [-1])\n kpt_coords_for_example_all_det = tf.tensor_scatter_nd_add(\n kpt_coords_for_example_all_det,\n combined_inds, kpt_coords_for_class)\n kpt_scores_for_example_all_det = tf.tensor_scatter_nd_add(\n kpt_scores_for_example_all_det,\n combined_inds, kpt_scores_for_class)\n\n kpt_coords_for_example_list.append(\n tf.expand_dims(kpt_coords_for_example_all_det, axis=0))\n kpt_scores_for_example_list.append(\n tf.expand_dims(kpt_scores_for_example_all_det, axis=0))\n\n # Concatenate all keypoints and scores from all examples in the batch.\n # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and\n # [batch_size, max_detections, num_total_keypoints], respectively.\n keypoints = tf.concat(kpt_coords_for_example_list, axis=0)\n keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)\n\n return keypoints, keypoint_scores", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def create_baselines(X_train, y_train, X_test, y_test, score_types=['accuracy', 'recall', 'precision', 'f1_score']):\n # establish baseline models\n print(\"Running baseline models...\")\n baselines = {'Weighted Random Guess': WeightedGuess(), 'Guess Most Frequent': MajorityGuess()}\n baseline_scores = {}\n for name in baselines:\n model = baselines[name]\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n scores = model.score(y_test, y_pred, scoring=score_types)\n baseline_scores[name] = scores\n print(\"{} Scores: \".format(name))\n for metric in scores:\n print(\"{} score: {}\".format(metric.capitalize(), round(scores[metric], 5)))\n return baselines, baseline_scores", "def postprocess(self, prediction_dict, **params):\r\n pass", "def _graph_fn_get_action_adapter_logits_parameters_log_probs(self, nn_output, nn_input):\n logits = FlattenedDataOp()\n parameters = FlattenedDataOp()\n log_probs = FlattenedDataOp()\n\n if isinstance(nn_input, dict):\n nn_input = next(iter(nn_input.values()))\n\n for flat_key, action_adapter in self.action_adapters.items():\n out = action_adapter.get_logits_parameters_log_probs(nn_output, nn_input)\n logits[flat_key], parameters[flat_key], log_probs[flat_key] = \\\n out[\"logits\"], out[\"parameters\"], out[\"log_probs\"]\n\n return logits, parameters, log_probs", "def save_parameters(self, session, out_dict=None):\n if out_dict is None:\n out_dict = {}\n for w in self.weights:\n out_dict[w.name.rsplit(':', 1)[0]] = session.run([w])[0]\n return out_dict", "def lfads_params(key, lfads_hps):\n keys = random.split(key, 9)\n\n data_dim = lfads_hps['data_dim']\n ntimesteps = lfads_hps['ntimesteps']\n enc_dim = lfads_hps['enc_dim']\n con_dim = lfads_hps['con_dim']\n ii_dim = lfads_hps['ii_dim']\n gen_dim = lfads_hps['gen_dim']\n factors_dim = lfads_hps['factors_dim']\n batch_size = lfads_hps['batch_size']\n ic_dim = enc_dim # Could make a different HP via a linear layer\n ib_dim = lfads_hps['ib_dim'] # inferred bias is a static input to generator\n z_dim = ic_dim + ib_dim + ntimesteps * ii_dim\n gmm_size = lfads_hps['gmm_size']\n\n ic_enc_params = {'fwd_rnn' : gru_params(keys[0], enc_dim, data_dim),\n 'bwd_rnn' : gru_params(keys[1], enc_dim, data_dim)}\n post_ib_params = affine_params(keys[2], 2*ib_dim, 2*enc_dim) # m, v <- bi \n post_ic_params = affine_params(keys[3], 2*gen_dim, 2*enc_dim) # m, v <- bi\n \n prior_params = gmm_params(keys[4], gmm_size, z_dim)\n con_params = gru_params(keys[5], con_dim, 2*enc_dim + factors_dim + ii_dim)\n con_out_params = affine_params(keys[6], 2*ii_dim, con_dim) #m, v\n gen_params = gru_params(keys[7], gen_dim, ii_dim + ib_dim)\n factors_params = linear_params(keys[8], factors_dim, gen_dim)\n lograte_params = affine_params(keys[9], data_dim, factors_dim)\n\n return {'ic_enc' : ic_enc_params,\n 'post_ib' : post_ib_params,\n 'post_ic' : post_ic_params,\n 'con' : con_params, 'con_out' : con_out_params,\n 'gmm' : prior_params,\n 'gen' : gen_params, 'factors' : factors_params,\n 'f0' : np.zeros((lfads_hps['factors_dim'],)),\n 'ii0' : np.zeros((lfads_hps['ii_dim'],)),\n 'logrates' : lograte_params}", "def _policy_nn(self):\n with tf.variable_scope(\"reward_params\") as scope:\n \n self.h1 = tf.layers.dense(self.input_ph, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h1\")\n self.h2 = tf.layers.dense(self.h1, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h2\")\n self.rewards = tf.layers.dense(self.h2, 1,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.hidden_dim)), name=\"rewards\")\n self.rewards_sum = tf.reduce_sum(self.rewards)", "def predict(context, top_n=5, normalize=False):\n with torch.no_grad():\n context = context.unsqueeze(0)\n candidates = fixed_candidates\n if args.cuda:\n context = context.cuda(non_blocking=True)\n ctx, _ = net(context, None)\n scores, index = score_candidates(ctx, cand_embs, top_n, normalize)\n response = []\n outputs = []\n for i, (score, index) in enumerate(zip(scores.squeeze(0), index.squeeze(0)), 1):\n response.append((stringify(candidates[index]), float(score)))\n if index < breakingpt:\n outputs.append(\"EmpChat\")\n elif index < breakingpt2:\n outputs.append(\"DailyDialog\")\n else:\n outputs.append(\"Reddit\")\n return response, outputs", "def final_predictions(x, y, x_tk, y_tk):\n # TODO: Train neural network using model_final\n model = model_final(x.shape,y.shape[1],\n len(x_tk.word_index)+1,\n len(y_tk.word_index)+1)\n model.summary()\n model.fit(x, y, batch_size=1024, epochs=25, validation_split=0.2)\n\n \n ## DON'T EDIT ANYTHING BELOW THIS LINE\n y_id_to_word = {value: key for key, value in y_tk.word_index.items()}\n y_id_to_word[0] = '<PAD>'\n\n sentence = 'he saw a old yellow truck'\n sentence = [x_tk.word_index[word] for word in sentence.split()]\n sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')\n sentences = np.array([sentence[0], x[0]])\n predictions = model.predict(sentences, len(sentences))\n\n print('Sample 1:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))\n print('Il a vu un vieux camion jaune')\n print('Sample 2:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))\n print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))", "def add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(FC_SIZE, activation='relu')(x) # new FC layer, random init\n predictions = Dense(nb_classes, activation='softmax')(x) # new softmax layer\n model = Model(input=base_model.input, output=predictions)\n return model", "def add_loss(self):\n with vs.variable_scope(\"loss\"):\n weights = tf.to_float(tf.not_equal(self.ans_ids, PAD_ID)) # [batch_size, context_len]\n\n # shift the weight right to include the end id\n batch_size = tf.shape(weights)[0]\n shift_val = tf.ones([batch_size, 1])\n\n self.new_ans_ids = tf.concat([self.ans_ids[:, 1:], tf.fill([batch_size, 1], 0)], 1)\n self.logits = self.train_logits\n weights = tf.concat([shift_val, weights], 1)[:, :-1]\n self.loss = tf.contrib.seq2seq.sequence_loss(self.logits, self.new_ans_ids, weights=weights)\n tf.summary.scalar('train_loss', self.loss)\n tf.summary.scalar('sampling_prob', self.sampling_prob)\n\n if self.FLAGS.pred_method == 'beam':\n self.dev_logits = tf.Print(self.dev_logits[:, :, 0], [tf.shape(self.dev_logit), self.dev_logits[0, :, 0]])\n self.dev_loss = tf.cast(self.dev_logits[0, 0], tf.float32)\n return\n dev_logits_len = tf.to_int32(tf.shape(self.dev_logits)[1])\n weights = tf.concat([weights[:, 1:], tf.fill([batch_size, 1], 0.0)], 1)\n self.dev_loss = tf.contrib.seq2seq.sequence_loss(\n self.dev_logits, self.new_ans_ids[:, :dev_logits_len],\n weights=weights[:, :dev_logits_len])", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'cluttered_nist_ix1',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['val_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 4\n exp['model_name'] = 'unet'\n exp['exp_name'] = exp['model_name'] + '_' + exp['dataset'][0]\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 200\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp" ]
[ "0.52572966", "0.52147967", "0.5190277", "0.5172461", "0.5160112", "0.5107622", "0.5044875", "0.502481", "0.49618483", "0.49577186", "0.49346328", "0.4871828", "0.47935718", "0.47819144", "0.47578776", "0.4739234", "0.47227845", "0.46987733", "0.46696952", "0.46637616", "0.46637616", "0.46511227", "0.46276534", "0.4614478", "0.46027955", "0.4583889", "0.45790952", "0.45701605", "0.4570038", "0.45660526", "0.45578164", "0.45527872", "0.45489547", "0.4540152", "0.45308465", "0.45297864", "0.4523766", "0.45116088", "0.450615", "0.45047992", "0.45030853", "0.4484141", "0.44661903", "0.4465182", "0.44639936", "0.4461899", "0.4460707", "0.44505703", "0.44504222", "0.44504222", "0.44403142", "0.44392675", "0.4439187", "0.4437419", "0.44348723", "0.44322726", "0.44310188", "0.44258872", "0.44258872", "0.44215468", "0.44212034", "0.4414977", "0.4412194", "0.4409099", "0.44086286", "0.44073302", "0.44001204", "0.43944925", "0.4392665", "0.43915915", "0.43817896", "0.43786982", "0.43727747", "0.43703642", "0.43700716", "0.43630952", "0.43595964", "0.43578085", "0.43551746", "0.4355111", "0.43514544", "0.4350542", "0.43493685", "0.43442515", "0.4342659", "0.43345293", "0.43269992", "0.4326529", "0.43146607", "0.43135643", "0.43094578", "0.43089017", "0.4308462", "0.43052644", "0.43040568", "0.430375", "0.4302613", "0.43002284", "0.4299752", "0.42984408" ]
0.56161755
0
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(resnet_v2_50, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['resnet_v2_50']()): net, end_points = networks_map['resnet_v2_50']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def initialize(self):\n logging.info(\"Loading model.\")\n\n self._bleurt_graph = tf.Graph()\n with self._bleurt_graph.as_default():\n\n imported = tf.saved_model.load(self.checkpoint)\n bleurt_model_ops = imported.signatures[\"serving_default\"]\n self._bleurt_ops = bleurt_model_ops(\n input_ids=tf.compat.v1.placeholder(tf.int64, name=\"input_ids\"),\n input_mask=tf.compat.v1.placeholder(tf.int64, name=\"input_mask\"),\n segment_ids=tf.compat.v1.placeholder(tf.int64, name=\"segment_ids\"))\n\n init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.tables_initializer())\n\n self.session = tf.compat.v1.Session(graph=self._bleurt_graph)\n self.session.run(init_op)\n\n logging.info(\"Done.\")", "def create_graph(self):\n self.graph = tf.Graph()\n model_type = self.options['model_type']\n optimiser_selected = self.options['optimizer']\n\n with self.graph.as_default():\n self.tf_dataset = tf.placeholder(tf.float32,\n shape=(None, self.options['num_steps'], self.input_dimensions))\n self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))\n self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')\n\n # Forward pass\n if model_type == 'rnn':\n self.predict = self.rnn_model(self.tf_dataset)\n elif model_type == 'lstm':\n self.predict = self.lstm_model(self.tf_dataset)\n else:\n raise NotImplementedError(\"Unimplemented RNN model keyword\")\n\n self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))\n\n if self.options['regularisation_coeff'] > 0.:\n # Add in L2 penalty for regularisation if required\n penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)\n for var in tf.trainable_variables())\n self.loss += penalty\n\n if self.options['use_customised_optimizer'] is False:\n if optimiser_selected == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif optimiser_selected == 'grad':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif optimiser_selected == 'ada':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif optimiser_selected == 'rms':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n else:\n raise NotImplementedError(\"Unimplemented built-in optimiser keyword.\")\n else:\n self.optimizer = self.options['customized_optimizer']\n self.minimise = self.optimizer.minimize(self.loss)", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _initialize_session(self):\n config = tf.ConfigProto()\n # restrict model GPU memory utilization to min required\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n tf_ver = int(tf.__version__.split('.')[1])\n if TF_VERSION <= 0.10:\n self.sess.run(tf.initialize_all_variables())\n logswriter = tf.train.SummaryWriter\n else:\n self.sess.run(tf.global_variables_initializer())\n logswriter = tf.summary.FileWriter\n self.saver = tf.train.Saver()\n self.summary_writer = logswriter(self.logs_path, graph=self.sess.graph) # change by ccx, add the graph_def", "def _build_graph(self):\n\n self.graph = tf.Graph()\n\n # set self.graph as default graph\n with self.graph.as_default():\n # # clear old variables\n # tf.reset_default_graph()\n\n # set random seed\n if self.random_seed is not None:\n tf.set_random_seed(self.random_seed)\n\n self._create_placeholders()\n self._create_variables()\n\n self._create_prediction()\n\n self._create_loss()\n self._create_optimizer()\n\n self._init = tf.global_variables_initializer()\n\n self.saver = tf.train.Saver()\n\n # create session\n self.sess = tf.Session(graph=self.graph)", "def init_target_net(self, sess):\n sess.run(self.init_target_net_op)", "def setup(self, context: ExecutionContext) -> BaseStep:\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True", "def _init_session(self):\n self.sess = tf.Session(graph=self.g)\n self.sess.run(self.init)", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W1 = tf.get_variable(\"W1\", shape=[self.h, self.N], initializer=tf.truncated_normal_initializer)\n self.b1 = tf.get_variable(\"b1\", shape=[self.h, 1], initializer=tf.zeros_initializer)\n\n self.W2 = tf.get_variable(\"W2\", shape=[self.C, self.h], initializer=tf.truncated_normal_initializer)\n self.b2 = tf.get_variable(\"b2\", shape=[self.C, 1], initializer=tf.truncated_normal_initializer)\n\n self.z1 = tf.matmul(self.W1, self.X) + self.b1\n self.a1 = self.activation(self.z1)\n\n self.z2 = tf.matmul(self.W2, self.a1) + self.b2\n self.y_hat = tf.nn.softmax(self.z2, dim=0)\n\n self.l2_reg = tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.W2)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z2, dim=0)) \\\n + self.beta * self.l2_reg\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def _initialize_eval_graph(self):\n self.X_test_tf = tf.placeholder(tf.int64, shape=[1, 3])\n\n self.table_entity_lookup_left = None\n self.table_entity_lookup_right = None\n self.table_reln_lookup = None\n\n all_entities_np = np.int64(np.arange(len(self.ent_to_idx)))\n\n if self.is_filtered:\n all_reln_np = np.int64(np.arange(len(self.rel_to_idx)))\n self.table_entity_lookup_left = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_left, dtype=np.int64))\n , 0)\n self.table_entity_lookup_right = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_right, dtype=np.int64))\n , 0)\n self.table_reln_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_reln_np,\n np.array(self.relation_primes, dtype=np.int64))\n , 0)\n\n # Create table to store train+test+valid triplet prime values(product)\n self.table_filter_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(np.array(self.filter_keys, dtype=np.int64),\n np.zeros(len(self.filter_keys), dtype=np.int64))\n , 1)\n\n corruption_entities = self.eval_config.get('corruption_entities', DEFAULT_CORRUPTION_ENTITIES)\n\n if corruption_entities == 'all':\n corruption_entities = all_entities_np\n elif isinstance(corruption_entities, list):\n corruption_entities = corruption_entities\n else:\n msg = 'Invalid type for corruption entities!!!'\n logger.error(msg)\n raise ValueError(msg)\n\n self.corruption_entities_tf = tf.constant(corruption_entities, dtype=tf.int64)\n\n self.out_corr, self.out_corr_prime = generate_corruptions_for_eval(self.X_test_tf,\n self.corruption_entities_tf,\n self.eval_config.get('corrupt_side',\n DEFAULT_CORRUPT_SIDE),\n self.table_entity_lookup_left,\n self.table_entity_lookup_right,\n self.table_reln_lookup)\n\n if self.is_filtered:\n # check if corruption prime product is present in dataset prime product\n self.presense_mask = self.table_filter_lookup.lookup(self.out_corr_prime)\n self.filtered_corruptions = tf.boolean_mask(self.out_corr, self.presense_mask)\n else:\n self.filtered_corruptions = self.out_corr\n\n self.concatinated_set = tf.concat([self.X_test_tf, self.filtered_corruptions], 0)\n\n e_s, e_p, e_o = self._lookup_embeddings(self.concatinated_set)\n self.scores_predict = self._fn(e_s, e_p, e_o)\n self.score_positive = tf.gather(self.scores_predict, 0)\n self.rank = tf.reduce_sum(tf.cast(self.scores_predict >= self.score_positive, tf.int32))", "def _setup_graph(self):\n sess = tf.Session()\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n # raise NotImplementedError\n state_ph, action_ph, next_state_ph, reward_ph = self._setup_placeholders()\n next_state_pred = self._dynamics_func(state_ph, action_ph)\n loss, optimizer = self._setup_training(state_ph, next_state_ph, next_state_pred)\n\n # fit cost function\n reward_pred = self._reward_func(state_ph, action_ph, next_state_pred)\n reawrd_loss, reward_optimizer = self._reward_training(reward_ph, reward_pred)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n # self._rollout_state_ph = tf.placeholder(tf.float32, (1, self._state_dim), name='rollout_state_ph')\n best_action = self._setup_action_selection(state_ph)\n\n # BONUS\n self._best_action_cross_entropy = self._cross_entropy_action_selection(state_ph)\n\n sess.run(tf.global_variables_initializer())\n\n return sess, state_ph, action_ph, next_state_ph, reward_ph, \\\n next_state_pred, loss, optimizer, best_action, reward_pred, reawrd_loss, reward_optimizer", "def add_initializer_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"initialization\") as scope:\n self.init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer())", "def _init_session(self):\n self.sess = tf.Session(config=self.config, graph=self.g)\n self.sess.run(self.init)", "def setup(self):\n if not hasattr(logger, 'LOG_DIR'):\n raise RuntimeError(\"logger directory wasn't set!\")\n\n self._setup() # subclass will setup the graph\n\n describe_model()\n # some final operations that might modify the graph\n logger.info(\"Setup summaries ...\")\n self.summary_writer = tf.summary.FileWriter(logger.LOG_DIR, graph=tf.get_default_graph())\n # create an empty StatHolder\n self.stat_holder = StatHolder(logger.LOG_DIR)\n\n logger.info(\"Setup callbacks graph ...\")\n self.config.callbacks.setup_graph(weakref.proxy(self))\n self.config.session_init._setup_graph()\n\n def after_init(scaffold, sess):\n logger.info(\"Graph variables initialized.\")\n self.config.session_init._run_init(sess)\n\n scaffold = tf.train.Scaffold(\n init_op=tf.global_variables_initializer(),\n init_fn=after_init)\n logger.info(\"Finalize the graph, create the session ...\")\n self.monitored_sess = tf.train.MonitoredSession(\n session_creator=tf.train.ChiefSessionCreator(\n scaffold=scaffold, config=self.config.session_config),\n hooks=self.config.callbacks.get_hooks())\n self.hooked_sess = self.monitored_sess # just create an alias\n self.sess = self.monitored_sess._tf_sess() # expose the underlying session also", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def initialize_session(self):\r\n self.logger.info(\"Initializing tf session\")\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.saver = tf.train.Saver()", "def __init__(self, resnet_size, bottleneck, num_classes, \n num_filters, kernel_size, conv_stride, time_kernel_size,\n first_pool_size, first_pool_stride,\n block_sizes, block_strides,\n final_size, data_format=None,\n model_name_scope='resnet_model'):\n self.resnet_size = resnet_size\n\n if not data_format:\n data_format = (\n 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')\n\n self.resnet_version = 2\n\n self.bottleneck = bottleneck\n if bottleneck:\n self.block_fn = _bottleneck_block_v2\n else:\n self.block_fn = _building_block_v2\n\n self.data_format = data_format\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.conv_stride = conv_stride\n self.time_kernel_size = time_kernel_size\n self.first_pool_size = first_pool_size\n self.first_pool_stride = first_pool_stride\n self.block_sizes = block_sizes\n self.block_strides = block_strides\n self.final_size = final_size\n self.dtype = tf.float32\n self.pre_activation = True\n self.model_name_scope = model_name_scope", "def __init__(self, sess, network, learning_rate=0.1, discount_factor=0.99):\n self.sess = sess\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.network = network\n self.defineUpdateOperations()\n self.init = tf.global_variables_initializer()\n self.initialize_variables()", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def _init_vars(self):\n print \"Initializing session\"\n self.x = tf.placeholder(tf.float32, shape=[None, 784])\n self.y = tf.placeholder(tf.float32, shape=[None, 10])", "def _build_graph(self, seed):\n self.g = tf.Graph()\n with self.g.as_default():\n tf.set_random_seed(seed)\n self._placeholders()\n self._policy_nn()\n self._loss_train_op()\n self.init = tf.global_variables_initializer()", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n session_conf = tf.ConfigProto(\n allow_soft_placement=self.FLAGS.allow_soft_placement,\n log_device_placement=self.FLAGS.log_device_placement)\n self.session = tf.Session(config=session_conf)\n self.session.run(tf.global_variables_initializer())\n try: \n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.FLAGS.num_checkpoints)\n except:\n pass", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n self.sess = tf.compat.v1.Session()\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()", "def initialize_and_train(self):\n self.probabilities = tf.nn.softmax(self.hidden_layer3,name = 'test_probabilities')\n \n \"\"\"Calulates 10 probabilities based off of our input nodes, than calculates the error using\n cross entropy function, which turns those ten probabilities into an integer value. we than take \n the mean of the cross entropy errors. Logits are the values to be used as input to softmax\"\"\"\n self.error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits = self.hidden_layer3, labels = self.outputs, name = 'error'))\n \"\"\"initialize all of our variables with acutal numbers\"\"\"\n with tf.Session() as session:\n session.run(self.filters.initializer)\n session.run(self.filters2.initializer)\n session.run(self.weights.initializer)\n session.run(self.weights2.initializer)\n session.run(self.bias.initializer)\n session.run(self.bias2.initializer)\n session.run(self.weights3.initializer)\n session.run(self.bias3.initializer)\n \"\"\"create gradient descent function\"\"\"\n self.train = tf.train.GradientDescentOptimizer(0.1).minimize(self.error)\n \n \"\"\"these are our two index's that give us our batch size for gradient descent below\"\"\"\n index1 = 0\n index2 = 500\n \"\"\"this for loop runs mini-batch gradient descent and prints error every ith iteration\"\"\"\n for i in range(4500): \n \"\"\"if our second index is less than the # of training sets, input propper index in feed_dict and run\"\"\"\n if index2 < int(self.images.shape[0]): \n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]} \n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"add 500 to each index and continue iterations\"\"\"\n index1 += 500\n index2 += 500\n \n elif index2 >= int(self.images.shape[0]):\n \"\"\"if our second index is greater than or equal to # of training sets, \n input propper index in feed_dict and run\"\"\"\n index2 == int(self.images.shape[0])\n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]}\n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"reset the index back to its orginal value and continue iterations\"\"\"\n index1 = 0\n index2 = 500 \n\n if iteration % 100 == 0: \n print(index1,index2)\n print('#', iteration, 'error is:', session.run(self.error, feed_dict))\n \"\"\"save the final results of our weights/filter variables as outputfile\"\"\"\n self.saver = tf.train.Saver() \n self.saver.save(session, \"/Users/bennicholl/Desktop/outputfile\")\n \n \"\"\"this below code is for tensorboard, a data visualization tool\"\"\"\n \"\"\"open local host:6006 on chrome, than type in hashtagged code block below in a terminal\"\"\"\n #python -m tensorboard.main --logdir=\"/Users/bennicholl/Desktop/output3\"\n with tf.Session() as session:\n writer = tf.summary.FileWriter(\"/Users/bennicholl/Desktop/output3\", session.graph)\n writer.close()", "def define_graph(self):\n with tf.name_scope('discriminator'):\n ##\n # Setup scale networks. Each will make the predictions for images at a given scale.\n ##\n\n self.scale_nets = []\n for scale_num in xrange(self.num_scale_nets):\n with tf.name_scope('scale_net_' + str(scale_num)):\n scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)\n self.scale_nets.append(DScaleModel(scale_num,\n int(self.height * scale_factor),\n int(self.width * scale_factor),\n self.scale_conv_layer_fms[scale_num],\n self.scale_kernel_sizes[scale_num],\n self.scale_fc_layer_sizes[scale_num]))\n\n # A list of the prediction tensors for each scale network\n self.scale_preds = []\n for scale_num in xrange(self.num_scale_nets):\n self.scale_preds.append(self.scale_nets[scale_num].preds)\n\n ##\n # Data\n ##\n\n self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')\n\n ##\n # Training\n ##\n\n with tf.name_scope('training'):\n # global loss is the combined loss from every scale network\n self.global_loss = adv_loss(self.scale_preds, self.labels)\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')\n self.train_op = self.optimizer.minimize(self.global_loss,\n global_step=self.global_step,\n name='train_op')\n\n # add summaries to visualize in TensorBoard\n loss_summary = tf.summary.scalar('loss_D', self.global_loss)\n self.summaries = tf.summary.merge([loss_summary])", "def __init__(self, graph, weights,\n input_tensor_name=None,\n output_tensor_name=None):\n\n self.sess = tf.Session()\n new_saver = tf.train.import_meta_graph(graph)\n new_saver.restore(self.sess, weights)\n\n get_tensor = tf.get_default_graph().get_tensor_by_name\n # Get the initial place holder, else default\n if input_tensor_name:\n self.placeholder = get_tensor(input_tensor_name)\n else:\n self.placeholder = get_tensor('Placeholder:0')\n\n if output_tensor_name:\n self.softmax = get_tensor(output_tensor_name)\n else:\n self.softmax = get_tensor('Softmax:0')\n\n # Save trainables into params\n trainable_params = tf.trainable_variables()\n layers = {}\n params = {}\n\n def add_to_layer(name):\n try:\n layers[name] = get_tensor(\"{}:0\".format(name))\n except KeyError:\n try:\n layers[name] = get_tensor(\"{}/Relu:0\".format(name))\n except KeyError:\n print(\"Activation Not Found.\")\n pass\n\n for v in trainable_params:\n if 'weight' in v.name:\n name = v.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Pooling layers usually don't have a nice way of gathering.\n for n in tf.get_default_graph().as_graph_def().node:\n if 'pool' in n.name:\n v = get_tensor(\"{}:0\".format(n.name))\n name = n.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Get trainable params - 1 holds locations the other is a dummy script\n self.params = {}\n self._params = params\n self.layers = layers\n # Save empty dict into blobs\n self.blobs = {}", "def build_graph(self):\n # Print\n if self.verbose:\n print('Building Yolo Graph....')\n # Reset default graph\n tf.reset_default_graph()\n # Input placeholder\n self.x = tf.placeholder('float32', [None, 448, 448, 3])\n # conv1, pool1\n self.conv1 = self.conv_layer(1, self.x, 64, 7, 2)\n self.pool1 = self.maxpool_layer(2, self.conv1, 2, 2)\n # size reduced to 64x112x112\n # conv2, pool2\n self.conv2 = self.conv_layer(3, self.pool1, 192, 3, 1)\n self.pool2 = self.maxpool_layer(4, self.conv2, 2, 2)\n # size reduced to 192x56x56\n # conv3, conv4, conv5, conv6, pool3\n self.conv3 = self.conv_layer(5, self.pool2, 128, 1, 1)\n self.conv4 = self.conv_layer(6, self.conv3, 256, 3, 1)\n self.conv5 = self.conv_layer(7, self.conv4, 256, 1, 1)\n self.conv6 = self.conv_layer(8, self.conv5, 512, 3, 1)\n self.pool3 = self.maxpool_layer(9, self.conv6, 2, 2)\n # size reduced to 512x28x28\n # conv7 - conv16, pool4\n self.conv7 = self.conv_layer(10, self.pool3, 256, 1, 1)\n self.conv8 = self.conv_layer(11, self.conv7, 512, 3, 1)\n self.conv9 = self.conv_layer(12, self.conv8, 256, 1, 1)\n self.conv10 = self.conv_layer(13, self.conv9, 512, 3, 1)\n self.conv11 = self.conv_layer(14, self.conv10, 256, 1, 1)\n self.conv12 = self.conv_layer(15, self.conv11, 512, 3, 1)\n self.conv13 = self.conv_layer(16, self.conv12, 256, 1, 1)\n self.conv14 = self.conv_layer(17, self.conv13, 512, 3, 1)\n self.conv15 = self.conv_layer(18, self.conv14, 512, 1, 1)\n self.conv16 = self.conv_layer(19, self.conv15, 1024, 3, 1)\n self.pool4 = self.maxpool_layer(20, self.conv16, 2, 2)\n # size reduced to 1024x14x14\n # conv17 - conv24\n self.conv17 = self.conv_layer(21, self.pool4, 512, 1, 1)\n self.conv18 = self.conv_layer(22, self.conv17, 1024, 3, 1)\n self.conv19 = self.conv_layer(23, self.conv18, 512, 1, 1)\n self.conv20 = self.conv_layer(24, self.conv19, 1024, 3, 1)\n self.conv21 = self.conv_layer(25, self.conv20, 1024, 3, 1)\n self.conv22 = self.conv_layer(26, self.conv21, 1024, 3, 2)\n self.conv23 = self.conv_layer(27, self.conv22, 1024, 3, 1)\n self.conv24 = self.conv_layer(28, self.conv23, 1024, 3, 1)\n # size reduced to 1024x7x7\n # fc1, fc2, fc3\n self.fc1 = self.fc_layer(29, self.conv24, 512,\n flatten=True, linear=False)\n self.fc2 = self.fc_layer(\n 30, self.fc1, 4096, flatten=False, linear=False)\n self.fc3 = self.fc_layer(\n 31, self.fc2, 1470, flatten=False, linear=True)\n # Run session\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, self.weightFile)\n # Print\n print('Graph built.')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def build_graph(self):\n\n ##### Build Graph #####\n baseModel.build_graph(self)\n\n ##### Create Optimization #####\n with tf.variable_scope(\"optimize\"):\n self.add_loss()\n self.add_accuracy()\n self.initialize_learning_rate()\n self.initialize_optimization()\n\n ##### History and Checkpoints #####\n self.hasTrained = False\n self._lastSaved = collections.defaultdict(None)\n self.history = collections.defaultdict(list)\n self.saver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestLossSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestAccSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n\n logging.basicConfig(level=logging.INFO)\n log_handler = logging.FileHandler(\"log.txt\")\n logging.getLogger().addHandler(log_handler)\n\n self.summaries = tf.summary.merge_all()", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self):\n super(Model, self).__init__()\n\n self.batch_size = 200\n self.hidden_size = 264\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\n\n self.dense_1 = tf.keras.layers.Dense(self.hidden_size, activation='relu')\n self.dense_2 = tf.keras.layers.Dense(self.hidden_size, activation='relu')", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']", "def build(self, mode):\n assert mode in ['train', 'eval']\n self.mode = mode\n self._setup_misc(mode)\n self._setup_images_and_labels()\n self._build_graph(self.images, self.labels, mode)\n\n self.init = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def start(self):\n self.sess = tf.Session()\n tf.global_variables_initializer().run(session=self.sess)", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def init_machine_learning(self):\n\t\ttry:\n\t\t\tprint(\"debut du chargement! \")\n\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\n\t\t\tself.session = tf.Session()\n\t\t\tnew_saver = tf.train.import_meta_graph(\"./modeles/avancer/model_avancer.meta\")\n\t\t\tnew_saver.restore(self.session, tf.train.latest_checkpoint('./'))\n\t\t\tall_vars = tf.get_collection('vars')\n\n\t\t\t#self.y_conv = all_vars[3]\n\t\t\t#self.keep_prob = all_vars[4]\n\n\t\t\t#for v in all_vars:\n\t\t\t#\tv_ = self.session.run(v)\n\t\t\t#\tprint(v_)\n\n\t\t\tprint(\"chargement terminer\")\n\t\t\t\n\t\texcept:\n\t\t\t\n\t\t\tprint(\"le chargement a echouer ! \\n creation d'un nouveau modele !\")\n\t\t\tself.mnist = input_data.read_data_sets(self.option[\"ch_mnist\"], one_hot=True)\n\n\t\t\tself.session = tf.InteractiveSession()\n\t\t\t#creation des variables\n\t\t\tW_conv1 = self.weight_variable([5, 5, 1, 32])\n\t\t\tb_conv1 = self.bias_variable([32])\n\n\t\t\t# Placeholder\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\t\t\ty_ = tf.placeholder(tf.float32, [None, 10])\n\n\t\t\t# Reshape\n\t\t\tx_image = tf.reshape(self.x , [-1,28,28,1])\n\n\t\t\th_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)\n\t\t\th_pool1 = self.max_pool_2x2(h_conv1)\n\n\t\t\tW_conv2 = self.weight_variable([5, 5, 32, 64])\n\t\t\tb_conv2 = self.bias_variable([64])\n\n\t\t\th_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)\n\t\t\th_pool2 = self.max_pool_2x2(h_conv2)\n\n\t\t\tW_fc1 = self.weight_variable([7 * 7 * 64, 1024])\n\t\t\tb_fc1 = self.bias_variable([1024])\n\n\t\t\th_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\t\t\th_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\t\t\t\n\t\t\tself.keep_prob = tf.placeholder(tf.float32)\n\t\t\th_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n\t\t\tW_fc2 = self.weight_variable([1024, 10])\n\t\t\tb_fc2 = self.bias_variable([10])\n\n\t\t\tself.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n\n\t\t\t\t\t\n\t\t\tcross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.y_conv, y_))\n\t\t\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t\t\tcorrect_prediction = tf.equal(tf.argmax(self.y_conv,1), tf.argmax(y_ ,1))\n\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\t\tself.session.run(tf.global_variables_initializer())\n\n\t\t\tprint(\"sauvegarde variable\")\n\t\t\ttf.add_to_collection(\"vars\", h_fc1_drop)\n\t\t\ttf.add_to_collection(\"vars\", W_fc2)\n\t\t\ttf.add_to_collection(\"vars\", b_fc2)\n\t\t\ttf.add_to_collection(\"vars\", self.y_conv)\n\t\t\ttf.add_to_collection(\"vars\", self.keep_prob)\n\n\t\t\tprint(\"lancement antrainement modele\")\n\t\t\t\n\t\t\tfor i in range(1000):\n\t\t\t\tbatch = self.mnist.train.next_batch(50)\n\t\t\t\tif i%100 == 0:\n\t\t\t\t\ttrain_accuracy = accuracy.eval(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob : 1.0})\n\t\t\t\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy))\n\t\t\t\ttrain_step.run(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob: 0.5})\n\n\t\t\tbatchSize = 5000\n\t\t\tfor i in range(len(self.mnist.train.labels) // batchSize):\n\t\t\t\tbat = self.mnist.test.next_batch(100)\n\t\t\t\tprint(\"test accuracy %g\" % accuracy.eval(feed_dict={self.x : bat[0], y_: bat[1], self.keep_prob: 1.0}))\n\t\t\t\n\t\t\t#sauvegarde des données\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsave_path = saver.save(self.session, \"./modeles/avancer/model_avancer\")\n\t\t\tprint(\"Model saved in file: %s\" % save_path)", "def init(self,sess):\n if not os.path.isfile(\\\n \"./Models/\" + self.mod_name + \".ckpt.meta\"):\n sess.run(tf.global_variables_initializer())\n return 0\n else:\n if self.gen_only:\n sess.run(tf.global_variables_initializer())\n self.load(sess)\n return 1", "def __init__(self, config):\n self.config = config\n etat.UsesTFSession.__init__(self)\n\n # Get path to model\n self.config.download_model_if_necessary()\n model_path = self.config.model_path\n\n # Load model\n self._prefix = \"main\"\n self._graph = etat.load_graph(model_path, prefix=self._prefix)\n self._sess = None\n\n # Load class labels\n labels_map = etal.load_labels_map(self.config.labels_path)\n self._class_labels = etal.get_class_labels(labels_map)\n self._num_classes = len(self._class_labels)\n\n # Get network\n network_name = self.config.network_name\n network_fn = nf.get_network_fn(\n network_name, num_classes=self._num_classes, is_training=False\n )\n self.img_size = network_fn.default_image_size\n\n # Get input operation\n self._input_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + self.config.input_name\n )\n\n # Get feature operation, if necessary\n features_name = None\n if self.config.generate_features:\n if self.config.features_name:\n features_name = self.config.features_name\n elif network_name in _DEFAULT_FEATURES_NAMES:\n features_name = _DEFAULT_FEATURES_NAMES[network_name]\n if features_name is not None:\n self._features_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + features_name\n )\n else:\n self._features_op = None\n\n # Get output operation\n if self.config.output_name:\n output_name = self.config.output_name\n else:\n output_name = _DEFAULT_OUTPUT_NAMES.get(network_name, None)\n if output_name is None:\n raise ValueError(\n \"`output_name` was not provided and network `%s` was not \"\n \"found in default outputs map\" % network_name\n )\n self._output_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + output_name\n )\n\n # Setup preprocessing\n self._transforms = self._make_preprocessing_fcn(\n network_name, self.config.preprocessing_fcn\n )\n self._preprocess = True\n\n self._last_features = None\n self._last_probs = None", "def __init__(self, model_path, img_width, img_height, gpu_fraction=1.0):\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=['input_1:0', 'cumsum_values_1:0'])\n\n self.img_width = img_width\n self.img_height = img_height", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize_variables(self):\n self.sess.run(self.init)", "def __init__(self, num_steps, model_load_path, num_test_rec):\n\n self.global_step = 0\n self.num_steps = num_steps\n self.num_test_rec = num_test_rec\n\n self.sess = tf.Session()\n self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph)\n\n if c.ADVERSARIAL:\n print 'Init discriminator...'\n self.d_model = DiscriminatorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.SCALE_CONV_FMS_D,\n c.SCALE_KERNEL_SIZES_D,\n c.SCALE_FC_LAYER_SIZES_D)\n\n print 'Init generator...'\n self.g_model = GeneratorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.FULL_HEIGHT,\n c.FULL_WIDTH,\n c.SCALE_FMS_G,\n c.SCALE_KERNEL_SIZES_G)\n\n print 'Init variables...'\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)\n self.sess.run(tf.global_variables_initializer())\n\n # if load path specified, load a saved model\n if model_load_path is not None:\n self.saver.restore(self.sess, model_load_path)\n print 'Model restored from ' + model_load_path", "def __init__(self):\n self.sess = tf.Session()\n vocab_path = os.path.join(params.data_dir, \"vocab%d\" % params.vocab_size)\n self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)\n self.model = model_utils.create_model(self.sess, True)\n self.model.batch_size = 1 # Respond 1 sentence at a time.", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def setup(self, params, training=True, **kwargs):\n\n tf.reset_default_graph()\n\n return super().setup(params=params, training=training, **kwargs)", "def _init_model(self, forrest):\n rels = self.get_rels(forrest)\n self._model = RDPModel(rels)", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def __init__(self, params=None):\n if isinstance(params, SSDParams):\n self.params = params\n else:\n self.params = SSDNet.default_params\n # if cfgs.DATA_FORMAT == \"NHWC\":\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None, None, 3],\n # name=\"input_images\")\n # else:\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3, None, None],\n # name=\"input_images\")\n\n # self.labels_batch = tf.placeholder(dtype=tf.int32, shape=[None, None, cfgs.NUM_CLASS+1], name=\"gt_labels\")\n # self.bboxes_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 4), name=\"gt_bboxes\")\n # self.scores_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, 1), name=\"gt_scores\")\n\n self.global_step = tf.train.get_or_create_global_step()", "def __init__(self, saved_model_path, pipeline_config_path):\r\n\r\n saved_model_path = os.path.realpath(saved_model_path)\r\n assert os.path.exists(saved_model_path)\r\n\r\n # Use tf2onnx to convert saved model to an initial ONNX graph.\r\n graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, \"serve\",\r\n [\"serving_default\"])\r\n log.info(\"Loaded saved model from {}\".format(saved_model_path))\r\n with tf.Graph().as_default() as tf_graph:\r\n tf.import_graph_def(graph_def, name=\"\")\r\n with tf_loader.tf_session(graph=tf_graph):\r\n onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)\r\n onnx_model = optimizer.optimize_graph(onnx_graph).make_model(\"Converted from {}\".format(saved_model_path))\r\n self.graph = gs.import_onnx(onnx_model)\r\n assert self.graph\r\n log.info(\"TF2ONNX graph created successfully\")\r\n\r\n # Fold constants via ONNX-GS that TF2ONNX may have missed.\r\n self.graph.fold_constants()\r\n \r\n # Pipeline config parsing.\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:\r\n text_format.Merge(f.read(), pipeline_config)\r\n\r\n # If your model is SSD, get characteristics accordingly from pipeline.config file.\r\n if pipeline_config.model.HasField(\"ssd\"):\r\n # Getting model characteristics.\r\n self.model = str(pipeline_config.model.ssd.feature_extractor.type)\r\n self.height = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width)\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold)\r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.max_detections_per_class)\r\n\r\n # If your model is Faster R-CNN get it's characteristics from pipeline.config file.\r\n elif pipeline_config.model.HasField(\"faster_rcnn\"): \r\n self.model = str(pipeline_config.model.faster_rcnn.feature_extractor.type) \r\n # There are two types of image_resizers, select accordingly from pipeline.config file.\r\n if pipeline_config.model.faster_rcnn.image_resizer.HasField(\"fixed_shape_resizer\"):\r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.width)\r\n elif pipeline_config.model.faster_rcnn.image_resizer.HasField(\"keep_aspect_ratio_resizer\"): \r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.keep_aspect_ratio_resizer.max_dimension)\r\n self.width = self.height\r\n else:\r\n log.info(\"Image resizer config is not supported\")\r\n sys.exit(1)\r\n\r\n # Getting model characteristics\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_score_threshold) \r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.faster_rcnn.first_stage_max_proposals)\r\n self.initial_crop_size = int(pipeline_config.model.faster_rcnn.initial_crop_size)\r\n self.second_score_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.score_threshold)\r\n self.second_iou_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.iou_threshold)\r\n\r\n else: \r\n log.info(\"Given pipeline.config file is not supported\")\r\n sys.exit(1)\r\n\r\n #print(self.model)\r\n #print(self.height)\r\n #print(self.width)\r\n #print(self.first_stage_nms_score_threshold)\r\n #print(self.first_stage_nms_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n #print(self.initial_crop_size)\r\n #print(self.second_score_threshold)\r\n #print(self.second_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n\r\n self.batch_size = None", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n\n self._add_placeholders()\n\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._add_seq2seq()\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n\n if self._mode == 'train':\n self._add_train_op()\n\n self._summaries = tf.summary.merge_all()\n\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)", "def create(self):\n # 1st Layer: Conv -> norm -> ReLu\n conv1 = self.conv(x=self.X, stride_y=1, stride_x=1, padding='SAME', name='conv1')\n norm1 = lrn(conv1, 2, 1e-04, 0.75, name='norm1')\n # Apply relu function\n relu1 = tf.nn.relu(norm1)\n\n # 2st Layer: Conv -> norm -> ReLu\n conv2 = self.conv(x=relu1, stride_y=1, stride_x=1, padding='SAME', name='conv2')\n norm2 = lrn(conv2, 2, 1e-04, 0.75, name='norm2')\n # Apply relu function\n relu2 = tf.nn.relu(norm2)\n\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 3st Layer: Conv -> norm -> ReLu\n conv3 = self.conv(x=pool2, stride_y=1, stride_x=1, padding='SAME', name='conv3')\n norm3 = lrn(conv3, 2, 1e-04, 0.75, name='norm3')\n # Apply relu function\n relu3 = tf.nn.relu(norm3)\n\n # 4st Layer: Conv -> norm -> ReLu\n conv4 = self.conv(x=relu3, stride_y=1, stride_x=1, padding='SAME', name='conv4')\n norm4 = lrn(conv4, 2, 1e-04, 0.75, name='norm4')\n # Apply relu function\n relu4 = tf.nn.relu(norm4)\n\n pool4 = tf.nn.max_pool(relu4, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 5st Layer: Conv -> norm -> ReLu\n conv5 = self.conv(x=pool4, stride_y=1, stride_x=1, padding='SAME', name='conv5')\n norm5 = lrn(conv5, 2, 1e-04, 0.75, name='norm5')\n # Apply relu function\n relu5 = tf.nn.relu(norm5)\n\n # 6st Layer: Conv -> norm -> ReLu\n conv6 = self.conv(x=relu5, stride_y=1, stride_x=1, padding='SAME', name='conv6')\n norm6 = lrn(conv6, 2, 1e-04, 0.75, name='norm6')\n # Apply relu function\n relu6 = tf.nn.relu(norm6)\n\n pool6 = tf.nn.avg_pool(relu6, ksize=[1, 4, 4, 1],\n strides=[1, 4, 4, 1],\n padding='SAME')\n\n flattened = tf.reshape(pool6, [-1, 128 * 4])\n self.fc7 = self.fc(flattened, name='fc7')", "def _setup_init(self):\n with tf.variable_scope(\"output\", reuse=True):\n assert self.q_values is not None\n self.policy_proba = tf.nn.softmax(self.q_values)", "def build_inference_graph(self):\n self.build_train_graph()", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def build_graph(self, save_model_path):\n if os.path.exists(\"{}.meta\".format(save_model_path)):\n logger.info(\"Graph existed, ready to be reloaded...\")\n else:\n logger.info(\"No graph can be loaded, so create a new graph...\")\n tf.reset_default_graph()\n # placeholders\n x = self.neural_net_image_input((32, 32, 3))\n y = self.neural_net_label_input(10)\n keep_prob = self.neural_net_keep_prob_input()\n\n # model\n logits_out = self.conv_net(x, keep_prob)\n\n # Name logits_out\n logits_out = tf.identity(logits_out, name='logits')\n\n # loss and optimizer\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_out, labels=y), name='cost')\n optimzer = tf.train.AdamOptimizer(name='optimizer').minimize(loss)\n\n # Accuracy\n correct_pred = tf.equal(tf.argmax(y, axis=1), tf.argmax(logits_out, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\n # print(type(tf.Variable(1)))\n saver = tf.train.Saver()\n if not os.path.exists('./savedModel'):\n os.mkdir('./savedModel')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.save(sess, './savedModel/cnn-model')", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def model_initializer():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n # model.add(tf.keras.layers.Dense(128, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(64, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(32, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n\n model.compile(optimizer='rmsprop',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def reset(self):\n self.close()\n self._sess = tf.Session(graph=self._graph)\n self._sess.run(self._initializer)", "def _init_tf(self):\n assert not self.tf_init_done, \\\n \"this class is not designed to be initialised twice\"\n self.sess = tf.get_default_session()\n self.optimiser = tf.train.AdamOptimizer(learning_rate=self.lr)\n\n # maps problem names to (obs var, q-value var) tuples\n self.obs_qv_inputs = {}\n losses = []\n loss_parts = None\n batch_sizes = []\n for problem in self.problems:\n this_obs_var, this_q_values, this_loss, this_loss_parts \\\n = self._instantiate_net(problem)\n self.obs_qv_inputs[problem.name] = (this_obs_var, this_q_values)\n this_batch_size = tf.shape(this_obs_var)[0]\n losses.append(this_loss)\n batch_sizes.append(tf.cast(this_batch_size, tf.float32))\n if loss_parts is None:\n loss_parts = this_loss_parts\n else:\n # we care about these parts because we want to display them to\n # the user (e.g. how much of my loss is L2 regularisation\n # loss?)\n assert len(loss_parts) == len(this_loss_parts), \\\n 'diff. loss breakdown for diff. probs. (%s vs %s)' \\\n % (loss_parts, this_loss_parts)\n # sum up all the parts\n new_loss_parts = []\n for old_part, new_part in zip(loss_parts, this_loss_parts):\n assert old_part[0] == new_part[0], \\\n \"names (%s vs. %s) don't match\" % (old_part[0],\n new_part[0])\n to_add = new_part[1] * tf.cast(this_batch_size, tf.float32)\n new_loss_parts.append((old_part[0], old_part[1] + to_add))\n loss_parts = new_loss_parts\n self.op_loss \\\n = sum(l * s for l, s in zip(losses, batch_sizes)) \\\n / sum(batch_sizes)\n # this is actually a list of (name, symbolic representation) pairs for\n # components of the loss\n self.loss_part_ops = [(name, value / sum(batch_sizes))\n for name, value in loss_parts]\n\n # Next bit hairy because we want combined grads (and also want to split\n # them out for TensorBoard to look at). Really this is similar to\n # self.op_train = self.optimiser.minimize(loss).\n params = self.weight_manager.all_weights\n # do a check that set(params) is the same as\n param_set = set(params)\n for problem in self.problems:\n their_param_set = set(problem.policy.get_params(trainable=True))\n assert their_param_set == param_set, \\\n \"policy for %s has weird params\" % problem.name\n\n grads_and_vars = self.optimiser.compute_gradients(\n self.op_loss, var_list=params)\n # see https://stackoverflow.com/a/43486487 for gradient clipping\n gradients, variables = zip(*grads_and_vars)\n gradients = list(gradients)\n # for grad, var in grads_and_vars:\n # gradients[0] = tf.Print(gradients[0], [tf.norm(grad), tf.norm(var)], 'grad/var norm for %s:' % var.name)\n grads_and_vars = zip(gradients, variables)\n self.op_train = self.optimiser.apply_gradients(\n grads_and_vars=grads_and_vars)\n for g, v in grads_and_vars:\n tf.summary.histogram(\n 'weight-grads/' + v.name, g, collections=['sl-hists'])\n for slot in self.optimiser.get_slot_names():\n slot_var = self.optimiser.get_slot(v, slot)\n if slot_var is not None:\n dest_name = 'slots-' + slot + '/' + v.name\n tf.summary.histogram(\n dest_name, slot_var, collections=['sl-hists'])\n\n # \"weights\" is probably set by some code somewhere deep in RLLab\n # TODO: this is probably not the best idea. Maybe do weight hist stuff\n # *here*?\n weight_op = tf.summary.merge_all('weights')\n # 'summaries_f_prob' (for activations) is set up in\n # CategoricalMLPPolicy.__init__. Again I stuck it deep in RLLab because\n # I'm an idiot.\n act_op = tf.summary.merge_all('sl-activations')\n tf.summary.merge([act_op, weight_op], collections=['sl-hists'])\n self.op_summary = tf.summary.merge_all('sl-hists')\n\n # tensorboard ops\n self._log_ops = {}\n\n self.sess.run(tf.global_variables_initializer())\n\n self.tf_init_done = True", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def __init__(self, sess, save_folder, file_name, **kwargs):\n\n tf.logging.info('Building graph for low dimensional score metric')\n self._build_graph(**kwargs)\n\n self.build_summaries()\n tf.logging.info('Summary operator made')\n\n self.sess = sess\n self.initialize_model(save_folder, file_name, sess)\n tf.logging.info('Model initialized')", "def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess", "def _initialize_local_and_global_variables(self):\n variables_initialization_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(variables_initialization_op)", "def __init__(self, MY_GRAPH_PATH):\n self.graph = tf.Graph()\n\n graph_def = None\n with tf.gfile.FastGFile(MY_GRAPH_PATH, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)", "def build_tf_graph(self):\n raise NotImplementedError", "def compile(self, seed=42):\n ops.reset_default_graph()\n self._log_params() # Small trick to get all the variables and log them\n # Create the graph object\n with tf.device(\"/gpu:0\"):\n logger.info(\"Building graph...\")\n tf.set_random_seed(seed)\n self.global_step = tf.get_variable(name=\"global_step\",\n shape=[],\n dtype='int32',\n initializer=tf.constant_initializer(0),\n trainable=False)\n self._create_placeholders()\n self._setup_graph_def()\n\n self._add_scalar_summary(self.loss)\n if self.eval_metric is not None:\n self._add_scalar_summary(self.eval_metric)\n self._is_graph_build = True", "def __init__(self, state_size, action_size, scope='global', layer_size=np.array([400, 300])):\n self.state_size = state_size\n self.action_size = action_size\n self.scope = scope\n with tf.variable_scope(scope):\n self.inputs = tf.placeholder(shape=[None, state_size], dtype=tf.float32)\n self.layers = [self.inputs]\n for i in range(len(layer_size)):\n self.layers.append(slim.fully_connected(self.layers[i], int(layer_size[i]), activation_fn=tf.nn.relu))\n\n self.policyLayer = slim.fully_connected(self.layers[-1], action_size, activation_fn=tf.nn.tanh)\n # Get the index of the highest output from the neural network\n self.maxOutputNode = tf.argmax(self.policyLayer, 1)", "def __init__(self, model='facenet-20180402-114759.pb'):\n print('Load Frozen Graph')\n\n with tf.gfile.FastGFile(os.path.join(os.path.dirname(__file__), \"weights\", model),\n 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n self.graph = tf.get_default_graph()\n\n print('Ended loading frozen graph')", "def __init__(self, model_path, gpu_fraction=1.0,\n input_name = 'input_1:0',\n output_name = 'output_node0:0',\n optimize = True,\n optimizer_args = None):\n\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n\n if optimize:\n if type(output_name) == list:\n sensitive_nodes = output_name\n else:\n sensitive_nodes = [output_name]\n graph_def = optimizeGraph(graph_def,\n sensitive_nodes,\n optimizer_args)\n if type(output_name) == list:\n return_elements = [input_name, *output_name]\n tensors = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n # The first is an input\n self.input_tensor = tensors[0]\n # The rest are outputs\n self.output_tensor = tensors[1:]\n else:\n return_elements = [input_name, output_name]\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n\n self.input_shape = self.input_tensor.get_shape().as_list()", "def fit_eval(self, sess):\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allow_growth = True\n self.sess = tf.Session(config=tfconfig)\n self.sess.run(tf.global_variables_initializer())\n self.new_saver=tf.train.import_meta_graph(self.meta_graph_path)\n self.new_saver.restore(sess,self.model_path)\n #graph = tf.get_default_graph()\n self.X_inputs=tf.get_collection(\"model.X_inputs\")[0]\n self.y_inputs=tf.get_collection(\"model.y_inputs\")[0]\n self.y_pred_meta=tf.get_collection(\"model.y_pred\")[0]\n self.lr=tf.get_collection(\"lr\")[0]\n self.batch_size=tf.get_collection(\"batch_size\")[0]\n self.keep_prob=tf.get_collection(\"keep_prob\")[0]\n self.attention=tf.get_collection(\"attention\")[0]\n self.correct_prediction_bilstm= tf.equal(tf.cast(tf.argmax(self.attention, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.correct_prediction_attention = tf.equal(tf.cast(tf.argmax(self.y_pred_meta, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.accuracy_attention = tf.reduce_mean(tf.cast(self.correct_prediction_attention, tf.float32))\n self.accuracy_bilstm = tf.reduce_mean(tf.cast(self.correct_prediction_bilstm, tf.float32))\n saver = tf.train.Saver(max_to_keep=3)\n saver.restore(sess, tf.train.latest_checkpoint(self.model.checkpoint_path))\n X_batch, y_batch = self.batch_gen.__next__()\n test_fetches = [self.attention, self.accuracy_attention, self.accuracy_bilstm, self.y_pred_meta]\n feed_dict = {self.X_inputs:X_batch, self.y_inputs:y_batch, self.lr:self._lr, self.batch_size:10, self.keep_prob:1.0}\n _att_pred, _att_acc, _bilstm_acc , _bilstm_pred = sess.run(test_fetches, feed_dict)\n print(_att_pred,_bilstm_pred, _att_acc, _bilstm_acc)\n return _att_pred,_bilstm_pred, _att_acc, _bilstm_acc", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build_graph(self):\n train_graph = tf.Graph()\n opts = self._options\n with train_graph.as_default():\n self.__inputs, self.__doc_inputs, self.__labels, self.__lr = self._get_inputs()\n embed, word_embeddings, combined_embed_vector_length = self._get_embedding_layer(\n self.__inputs, self.__doc_inputs)\n\n norm_w = tf.sqrt(tf.reduce_sum(tf.square(word_embeddings), 1, keep_dims=True))\n self.__normalized_word_embeddings = word_embeddings / norm_w\n\n weights = tf.Variable(\n tf.truncated_normal((self.vocab_size, combined_embed_vector_length),\n stddev=1.0 / math.sqrt(combined_embed_vector_length))\n )\n biases = tf.Variable(tf.zeros(self.vocab_size))\n\n if opts.loss == 'softmax':\n loss = tf.nn.sampled_softmax_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"Softmax loss\", loss)\n else:\n loss = tf.nn.nce_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"NCE loss\", loss)\n\n self.__cost = tf.reduce_mean(loss)\n\n if opts.train_method == 'Adam':\n self.__optimizer = tf.train.AdamOptimizer(self.__lr).minimize(self.__cost)\n else:\n self.__optimizer = tf.train.GradientDescentOptimizer(self.__lr).minimize(self.__cost)\n\n self.__summary = tf.summary.merge_all()\n\n self._session = tf.Session(graph=train_graph)\n self.saver = tf.train.Saver()\n return self", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def __init__(self):\n self._graph = tf.Graph()\n self._session = tf.compat.v1.Session(graph=self._graph)\n\n # This lock is for multi-threaded contexts where multiple threads\n # share the same EvalSavedModel.\n #\n # Locking is required in the case where there are multiple threads using\n # the same EvalMetricsGraph. Because the metrics variables are part of the\n # session, and all threads share the same session, without a lock, the\n # \"reset-update-get\" steps may not be atomic and there can be races.\n #\n # Having each thread have its own session would also work, but would\n # require a bigger refactor.\n # TODO(b/131727905): Investigate whether it's possible / better to have\n # each thread have its own session.\n self._lock = threading.Lock()\n\n # Variables that need to be populated.\n\n # The names of the metric.\n self._metric_names = []\n\n # Ops associated with reading and writing the metric variables.\n self._metric_value_ops = []\n self._metric_update_ops = []\n self._metric_variable_assign_ops = []\n\n # Nodes associated with the metric variables.\n self._metric_variable_nodes = []\n\n # Placeholders and feed input for the metric variables.\n self._metric_variable_placeholders = []\n self._perform_metrics_update_fn_feed_list = []\n self._perform_metrics_update_fn_feed_list_keys = []\n\n # OrderedDicts that map features, predictions, and labels keys to their\n # tensors.\n self._features_map = {}\n self._predictions_map = {}\n self._labels_map = {}\n\n # Ops to set/update/reset all metric variables.\n self._all_metric_variable_assign_ops = None\n self._all_metric_update_ops = None\n self._reset_variables_op = None\n\n # Callable to perform metric update.\n self._perform_metrics_update_fn = None\n\n # OrderedDict produced by graph_ref's load_(legacy_)inputs, mapping input\n # key to tensor value.\n self._input_map = None\n\n self._batch_size = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size'))\n self._batch_size_failed = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size_failed'))\n\n try:\n self._construct_graph()\n except (RuntimeError, TypeError, ValueError,\n tf.errors.OpError) as exception:\n general_util.reraise_augmented(exception, 'Failed to create graph.')", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def load_resnet(self, resnet_dir, keep_last=False):\n ckpt = tf.train.latest_checkpoint(resnet_dir)\n with tf.Session(config=self.config) as sess:\n # init model\n init = [tf.global_variables_initializer(), tf.local_variables_initializer()]\n sess.run(init)\n if keep_last:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name]\n else:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name\n and 'conv6' not in v.name]\n loader = tf.train.Saver(var_list=restore_var)\n # load model\n self.load(ckpt, sess, loader)", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def __init__(self, model):\r\n self._tensorflow_session = model._tensorflow_session\r\n self._model = model", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _setup(self):\n\n # caffe-tensorflow/convert.py can only run with Python2. Since the default encoding format of Python2 is ASCII\n # but the default encoding format of Python3 is UTF-8, it will raise an error without 'encoding=\"latin1\"'\n weight_dict = np.load(self.vgg16_path, encoding=\"latin1\").item()\n\n scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3',\n 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3']\n for scope in scopes:\n with tf.variable_scope(scope.split('_')[0] + '/' + scope, reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w_init_op = weights.assign(weight_dict[scope]['weights'])\n b_init_op = biases.assign(weight_dict[scope]['biases'])\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc6', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc6']['weights']\n b = weight_dict['fc6']['biases']\n w = np.reshape(w, (7, 7, 512, 4096))\n w = w[0:-1:2, 0:-1:2, :, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc7', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc7']['weights']\n b = weight_dict['fc7']['biases']\n w = np.reshape(w, (1, 1, 4096, 4096))\n w = w[:, :, 0:-1:4, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)", "def __init__(self, in_seq_length, out_seq_length, hidden_dim,\n n_epochs=1500, learning_rate=0.0001,\n save_file='./forecastnet.ckpt', model='dense'):\n # Initialize variables passed\n self.in_seq_length =in_seq_length\n self.out_seq_length = out_seq_length\n self.hidden_dim = hidden_dim\n self.n_epochs = n_epochs\n self.learning_rate = learning_rate\n self.save_file = save_file\n self.model = model\n\n # Reset the default graph\n tf.reset_default_graph()\n\n # Set random seed to keep consistent results\n # tf.set_random_seed(1)\n\n # Create the placeholders for the TensorFlow graph\n self.X, self.Y, self.is_training = self.create_placeholders()\n\n # Build the TensorFlow graph\n self.build_graph()\n\n # Define the tensorflow optimizer. Use an AdamOptimizer.\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n\n # Print the number of trainable parameters of the model\n print('Trainable variables = ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))\n print('')", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def _build_graph(self, inputs):\n\n # inputs contains a list of input variables defined above\n input_from_sensor1, input_from_sensor2, label = inputs\n print \"ok\"\n print input_from_sensor1\n # In tensorflow, inputs to convolution function are assumed to be\n # NHWC. Add a single channel here.\n #image = tf.expand_dims(image, 3)\n\n #image = image * 2 - 1 # center the pixels values at zero\n # The context manager `argscope` sets the default option for all the layers under\n # this context. Here we use 32 channel convolution with shape 3x3\n\n sensor1 = Sequential('sensor1', input_from_sensor1) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)() \n\n print sensor1\n\n sensor2 = Sequential('sensor2', input_from_sensor2) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n output = Connect('cloud', [sensor1, sensor2], \"inner_product\") \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n tf.nn.softmax(output, name='prob') # a Bx10 with probabilities\n\n #g = tf.get_default_graph()\n #for v in g.as_graph_def().node:\n # print v.name\n\n # a vector of length B with loss of each sample\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss\n\n correct = tf.cast(tf.nn.in_top_k(output, label, 1), tf.float32, name='correct')\n accuracy = tf.reduce_mean(correct, name='accuracy')\n\n # This will monitor training error (in a moving_average fashion):\n # 1. write the value to tensosrboard\n # 2. write the value to stat.json\n # 3. print the value after each epoch\n train_error = tf.reduce_mean(1 - correct, name='train_error')\n summary.add_moving_summary(train_error, accuracy)\n\n # Use a regex to find parameters to apply weight decay.\n # Here we apply a weight decay on all W (weight matrix) of all fc layers\n wd_cost = tf.multiply(1e-5,\n regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n\n self.cost = tf.add_n([wd_cost, cost], name='total_cost')\n\n summary.add_moving_summary(cost, wd_cost, self.cost)\n\n # monitor histogram of all weight (of conv and fc layers) in tensorboard\n summary.add_param_summary(('.*/W', ['histogram', 'rms']))" ]
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365", "0.6238946", "0.6238248", "0.62260884", "0.62216014", "0.61992395", "0.6188734", "0.6182232", "0.6179242", "0.6167892", "0.6159257", "0.6145693", "0.61373615", "0.61370456", "0.6135909", "0.61055654", "0.6098847", "0.60771847", "0.6072279", "0.6068658", "0.6060275", "0.60361105", "0.60349673", "0.6022777", "0.59995234", "0.5975665", "0.59745455", "0.59699357", "0.59679097", "0.59655076", "0.5944577", "0.59443724", "0.5942477", "0.5916095", "0.5910781", "0.5903678", "0.59029704", "0.5899744", "0.5894725", "0.5892079", "0.58791643", "0.5875119", "0.5865222", "0.585884", "0.58562565", "0.5853644", "0.58522123", "0.5844606", "0.5841884", "0.5839001", "0.5832439", "0.5830157", "0.582558", "0.58121085", "0.58112323", "0.5808803", "0.5806357", "0.580387", "0.57945246", "0.5793703", "0.5792321", "0.5785907", "0.57851183", "0.57754564", "0.57655543", "0.57646304", "0.57595575", "0.57500404", "0.57434803", "0.574348", "0.57424086", "0.5735521", "0.57310385", "0.5730755", "0.57296336", "0.5727137", "0.57193434", "0.57140994", "0.57094336", "0.570941" ]
0.6228969
23
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')", "def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])", "def load_weights(self, filepath):\n self.model.load_weights(filepath)", "def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_initial_weights(self, sess, weights_path, SKIP_LAYER):\r\n # Load the weights into memory\r\n weights_dict = np.load(weights_path, encoding='bytes').item()\r\n\r\n # list of all assignment operators\r\n # Loop over all layer names stored in the weights dict\r\n for op_name in weights_dict:\r\n\r\n # Check if layer should be trained from scratch\r\n if op_name not in SKIP_LAYER:\r\n\r\n with tf.variable_scope('model/source/' + op_name, reuse=True):\r\n\r\n # Assign weights/biases to their corresponding tf variable\r\n for data in weights_dict[op_name]:\r\n\r\n # Biases\r\n if len(data.shape) == 1:\r\n var = tf.get_variable('biases', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))\r\n\r\n # Weights\r\n else:\r\n var = tf.get_variable('weights', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_weights(self, file):\n self.model.load_weights(file)\n return", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_model_weights(self, filename):\n self.model.load_weights(filename)", "def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])", "def load_tf_weights_in_bert(model, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load(self, filename):\n self.model.load_weights(filename)", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_ckpt(model,\n weight_path,\n **kargs):\n #model.set_state_dict(state_dict)\n\n if not osp.isfile(weight_path):\n raise IOError(f'{weight_path} is not a checkpoint file')\n #state_dicts = load(weight_path)\n\n logger = get_logger(\"paddlevideo\")\n state_dicts = paddle.load(weight_path)\n if \"VisionTransformer\" in str(model): # For TimeSformer case\n tmp = pretrain_vit_param_trans(model, state_dicts, kargs['num_patches'], kargs['seg_num'], kargs['attention_type'])\n else:\n tmp = {}\n total_len = len(model.state_dict())\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n for item in tqdm(model.state_dict(), total=total_len, position=0):\n name = item\n desc.set_description('Loading %s' % name)\n if name not in state_dicts: # Convert from non-parallel model\n if str('backbone.' + name) in state_dicts:\n tmp[name] = state_dicts['backbone.' + name]\n else: # Convert from parallel model\n tmp[name] = state_dicts[name]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n model.set_state_dict(tmp)", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, checkpoint_path, silent = False):\n ckc = CheckpointCache(checkpoint_path)\n\n if not self.built:\n dymmy_inputs = np.array([[0,1,2]])\n self([dymmy_inputs])\n \n symbolic_weights = self.trainable_weights + self.non_trainable_weights\n \n variable_keys = [self._clean_weight_name(symbolic_weight.name) for symbolic_weight in symbolic_weights]\n variable_keys = [self._convert_variable_name(key) for key in variable_keys]\n\n unloaded_keys = set(ckc.keys()) - set(variable_keys)\n if not silent:\n print('unused keys:', unloaded_keys)\n \n values = [ckc.get_values(key) for key in variable_keys]\n \n name_value_pair = []\n\n for weight, value in zip(symbolic_weights, values):\n if weight.shape != value.shape:\n raise ValueError(f'The shape of {weight.name} is {weight.shape} but shape from checkpoint is {value.shape}.')\n if weight.dtype != value.dtype:\n raise ValueError(f'The type of {weight.name} is {weight.dtype} but type from checkpoint is {value.dtype}.')\n \n name_value_pair.append((weight, value))\n \n K.batch_set_value(name_value_pair)\n \n return unloaded_keys", "def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)", "def load_checkpoint(filename: str) -> CheckpointData:\n return torch.load(filename)", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_weights_file(self, file_path):\n\n # Load the weights\n self._cnn_model.load_weights(file_path)", "def load_checkpoint(filename, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n return torch.load(filename)\r\n else:\r\n return torch.load(filename, map_location=lambda storage, loc: storage)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_weights(self, the_path):\n self.model.load_state_dict(torch.load(the_path))", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_initial_weights(self, session, SKIP_LAYER=[]):\n if SKIP_LAYER:\n self.SKIP_LAYER = SKIP_LAYER\n \n layer_params = np.load(self.WEIGHTS_PATH, encoding = \"latin1\").item()\n \n # Loop over all layer names stored in the weights dict\n for op_name in layer_params:\n # Check if the layer is one of the layers that should be reinitialized\n if op_name not in self.SKIP_LAYER:\n with tf.variable_scope(op_name, reuse = True):\n # Loop over list of weights/biases and assign them to their corresponding tf variable\n print(\"load layer params:%s\" % op_name)\n for key in layer_params[op_name]:\n data = layer_params[op_name][key]\n # Biases\n if len(data.shape) == 1:\n var = tf.get_variable('biases', trainable = False)\n session.run(var.assign(data))\n # Weights\n else:\n var = tf.get_variable('weights', trainable = False)\n session.run(var.assign(data))", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def _load_weights(self):\n self.npz_weights = np.load(self._weight_file)\n self._load_byte_embedding()\n self._load_cnn_weights()\n self._load_highway()\n self._load_projection()", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def load_graph(filename):\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\ttf.import_graph_def(graph_def, name='')", "def load_pretrained_net_weights(net, ckpt_path):\n print(\"Loading Model: \", ckpt_path)\n print('')\n\n net.load_weights(ckpt_path).expect_partial()", "def load_model(sess, meta_file, checkpoint_file):\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n \n configs = tf.get_collection('configs')\n pvars = tf.get_collection('placeholders')\n \n model_settings = dict()\n for c in configs:\n name = c.name.split(':')[0]\n model_settings[name] = sess.run(c)\n \n model_vars = dict()\n for p in pvars:\n name = p.name.split(':')[0]\n model_vars[name] = p\n model_vars['probs'] = tf.get_collection('probs')[0]\n \n return model_settings, model_vars", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_pretrained_weights(model, weight_path):\n checkpoint = load_checkpoint(weight_path)\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n model_dict = model.state_dict()\n new_state_dict = OrderedDict()\n matched_layers, discarded_layers = [], []\n\n for k, v in state_dict.items():\n if k.startswith('module.'):\n k = k[7:] # discard module.\n\n if k in model_dict and model_dict[k].size() == v.size():\n new_state_dict[k] = v\n matched_layers.append(k)\n else:\n discarded_layers.append(k)\n\n model_dict.update(new_state_dict)\n model.load_state_dict(model_dict)\n\n if len(matched_layers) == 0:\n warnings.warn(\n 'The pretrained weights \"{}\" cannot be loaded, '\n 'please check the key names manually '\n '(** ignored and continue **)'.format(weight_path)\n )\n #else:\n #print(\n # 'Successfully loaded pretrained weights from \"{}\"'.\n # format(weight_path)\n #)\n #if len(discarded_layers) > 0:\n # print(\n # '** The following layers are discarded '\n # 'due to unmatched keys or layer size: {}'.\n # format(discarded_layers)\n # )", "def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)", "def load_weights(self, file_path):\n self.model.load_weights(file_path + '/policy_network.h5')\n print(\"\\nrestored weights of the policy network.\\n\")", "def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_weights(self, weights):\n weight = np.load(weights)\n return weight", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n pass\n else:\n raise TypeError('pretrained must be a str or None')", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_weights(self, weights):\n\n i = 0\n for l in range(1, self.num_layers()):\n for n in range(self.get_layer(l).num_nodes):\n for w in range(len(self.get_node_with_layer(l, n).weights)):\n self.get_node_with_layer(l, n).weights[w] = weights[i]\n i += 1", "def restore(self, weights_file):\r\n\r\n self.model.load_weights(weights_file, by_name=True)", "def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_weights(self):\n try:\n print('loading weights from {}'.format(self.cfg.class_model_dir))\n self.load_state_dict(torch.load(self.cfg.class_model_dir + self.class_model_name + '.pth'))\n except Exception as e:\n print(\"load weights exception: {}\".format(e))", "def load_model_states_from_checkpoint(model, filename, tag, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n checkpoint = torch.load(filename)\r\n else:\r\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\r\n model.load_state_dict(checkpoint[tag])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_model(self, file_name):\n\t\tself.model.load_weights(file_name)", "def init_weights(self, pretrained=None, strict=True):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=strict, logger=logger)\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. But received {type(pretrained)}.')", "def load_weights(self, file_path, format=None, in_order=True, skip=False):\n _load_weights(self, file_path, format, in_order, skip)", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def resnet_init_from_checkpoint_fn(checkpoint):\n logging.info('Initializing model weights from %s', checkpoint)\n assignment_map = {}\n resnet_scope = _get_resnet_scope()\n for var in contrib_framework.get_variables(\n scope=resnet_scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n if 'dense' not in var.op.name:\n # Remove the parent scope prefix.\n name_in_ckpt = var.op.name.replace(resnet_scope, 'resnet_model/')\n assignment_map[name_in_ckpt] = var\n tf.train.init_from_checkpoint(checkpoint, assignment_map)" ]
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.6727852", "0.6713089", "0.6706459", "0.6683782", "0.6658891", "0.66465485", "0.6617412", "0.6616446", "0.6595445", "0.6595445", "0.65860444", "0.6582845", "0.65760124", "0.6567407", "0.6567407", "0.655", "0.654494", "0.654308", "0.65425074", "0.65225554", "0.6519562", "0.6502056", "0.64976", "0.6494225", "0.648421", "0.6479", "0.6476018", "0.64715683", "0.64661586", "0.64661586", "0.64654607", "0.64533824", "0.6452823", "0.64477986", "0.64477986", "0.64337814", "0.64327794", "0.6423933", "0.6423651", "0.6417821", "0.63964593", "0.63902766", "0.6384771", "0.63653183", "0.63635445", "0.635375", "0.63508856", "0.6348675", "0.6344284", "0.6343296", "0.6342838", "0.63309", "0.6330147", "0.6329762", "0.63064724", "0.6300289", "0.62704086", "0.62672496", "0.62668973", "0.6265366", "0.62633765", "0.6259392", "0.6257295", "0.6255254", "0.62301844", "0.6229952", "0.6226355", "0.62140304", "0.62128186", "0.6211067", "0.61976856", "0.61976856", "0.61963856", "0.6194719", "0.6172211", "0.61630845", "0.61520475", "0.61514324" ]
0.77680707
2
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_model_params(model):\n for param, value in zip(model.param_names, model.parameters):\n print(\"{:0.4f}\\t{}\".format(value, param))", "def print_params(self):\n print(self._list_params())", "def _get_parameters(self) -> list:\n return self.parameters", "def get_params(self):\n return []", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def get_params(self):\n return list(self.params.values())", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def get_params_list():\n return common.QOL_PARAMS", "def get_resource_params():\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def model_parameters(self) -> Iterator[Tuple[str, torch.Tensor]]:\n return self._model.named_parameters()", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def param(self):\n return []", "def param(self):\n return []", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def parameter_lists_for_model(self, model: AbstractPriorModel) -> List[float]:\n if self.is_path_kwargs:\n paths = model.all_paths\n else:\n paths = model.all_names\n\n return self.parameter_lists_for_paths(paths)", "def parameters(self):\n return self._params", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def get_model_params(self):\n return self._model_params", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def param(self):\r\n\r\n return []", "def get_params(self):\n pass", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameters(self):\n return self.pars", "def parameters(self):\n return self._params", "def param(self):\r\n return []", "def help(cls):\n print(cls._LIST_PARAMETERS)", "def get_params(self):", "def parameters(self):\n return self.vars", "def get_params(self):\n raise NotImplementedError", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def list_param(command):\n namespace = app.main(command)\n assert namespace.command == 'lp' or namespace.command == \"listparam\"", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def parameters(self):\n return [o.parameters for o in self.obs]", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def show_params(self):\n \n return self.params[self.profile]", "def params(self):\n\t\treturn self.params_", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def parameters(self):\n pass", "def params(self):\n return self._pars", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def params(self) -> List[ParamSpec]:\n return self._params", "def parameters(self):\n\n return self._parameters", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def get_params(self):\n\n return self.params_", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def _list_params(self, the_list: List):\n return [p for e in the_list for p in self._params(e)]", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def get(self):\n return self.params", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def get_params_iter(self):\n return []", "def get_embed_params(model) -> List:\r\n return [param for name, param in model.named_parameters() if \"embed\" in name]", "def parameters(self):\n return self.trainer_parameters", "def parameters(self):\n return self.trainer_parameters", "def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters", "def parameters(self):", "def params():\n raise NotImplementedError", "def get_param_names(self):\n return list(self.params.keys())", "def param_values(self):\n return self._param_values", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def get(self, *args):\n return _libsbml.ListOfParameters_get(self, *args)", "def params(self) -> Munch:\n return self._params", "def get_parameters(self):\n return self.context.params", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_params(self):\n params = []\n params.append(('rows', str(self._rows)))\n if self._page > 1:\n params.append(('start', str((self._page - 1) * self._rows)))\n\n return params", "def parameters(self):\n return self._default_params", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> List[str]:", "def get_parList(self):\n parList = []\n for modelName in self._modelList:\n model = self.__modelDict[modelName]\n modelParDict = model.parFitDict\n for parName in modelParDict.keys():\n parList.append(modelParDict[parName][\"value\"])\n return parList" ]
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.6712457", "0.6694902", "0.66933364", "0.6691488", "0.66579175", "0.665548", "0.6649228", "0.66487366", "0.66479886", "0.66403294", "0.66145444", "0.6569835", "0.6568972", "0.65615726", "0.65540904", "0.6544046", "0.6527509", "0.6527509", "0.6527509", "0.6526537", "0.65207803", "0.65207803", "0.6512", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6486162", "0.64849406", "0.64796937", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6457764", "0.64506465", "0.64479744", "0.6446706", "0.6440776", "0.6440776", "0.6440776", "0.6433494", "0.63789225", "0.6367834", "0.6361364", "0.632063", "0.6302258", "0.6289967", "0.6287379", "0.62705636", "0.6267131", "0.62598896", "0.62573093", "0.6242155", "0.6225191", "0.62155", "0.62155", "0.6212865", "0.62126464", "0.6211837", "0.61864233", "0.6184046", "0.6182091", "0.6181579", "0.61734796", "0.61494285", "0.61487633", "0.61447823", "0.61333835", "0.61279726", "0.6113043", "0.6107981", "0.61042255" ]
0.61706924
91
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['resnet_v2_50']()): net, end_points = networks_map['resnet_v2_50']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'resnet_v2_50/logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _get_layers(self) :\n \n return self._layers", "def add_bluprint_layers(self, models_dict, graph, class_names, dimensions,\n show_activation, show_constant_input):\n top_model = models_dict['top-model']\n # Get the top-model color list containing\n # the base color and the layer shades\n top_model_color = top_model[1]\n # get the model\n top_model = top_model[0]\n\n # Get the layers of the model\n layers = top_model[\"config\"][\"layers\"]\n # Loop through the layers\n for layer in layers:\n # If the layer is not a model\n if layer[\"class_name\"] != \"Model\":\n # Get the layer name\n layer_name = layer[\"name\"]\n # If label only layer's class name\n if class_names:\n # Get the layer's information\n layer_info = self.viz_utils.query_layer(layer_name,\n models_dict)\n # Get the layer's class name\n layer_class = layer_info['class_name']\n # If the layer is a a constant input layer,\n # manually specify the class name\n if layer_name.find('constant_input') != -1:\n layer_class = 'Constant Input'\n # Depending on the class name\n # find the the layer shade\n # If the layer is a constant_input layer\n # the color is black\n model_color = top_model_color[1].get(layer_class, \"black\")\n else:\n # If don't use class names for layers\n # then use the layer name from the JSON\n layer_class = layer_name\n model_color = top_model_color[0]\n\n\n # Add the node to the graph\n graph = self.viz_utils.add_nodes(layer_name, graph,\n layer_class, model_color,\n dimensions, show_constant_input)\n\n # Add Blueprint Inbound Edges\n graph = self.connect_blueprint_inbounds(models_dict,\n layer, graph,\n class_names, dimensions,\n show_activation, show_constant_input)\n else:\n # Add Softmod\n graph = self.connect_softmod_in_blueprint(models_dict,\n layer, graph, class_names,\n dimensions, show_activation, show_constant_input)\n\n return graph", "def get_all_layers(model):\n layers = []\n for l in model.layers:\n if hasattr(l, 'layers'):\n layers += get_all_layers(l)\n else:\n layers.append(l)\n return layers", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def layers(self):\n return self['layers']", "def layers(self) ->Optional[nn.ModuleList]:\n return self._layers", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def layers(self, x):\n raise NotImplementedError", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def UpdateLayers(self):\n pass", "def run(layers):", "def getLayers(self):\n return self.__layers", "def make_feature_layers(self, config):\n raise NotImplementedError", "def layers(self, layers):\n\n self._layers = layers", "def layers(self):\r\n return self._flc.layers", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def print_layers(model):\r\n for i in range(len(model.layers)):\r\n print(\"Printing layer shape: %d\" % i, model.layers[i])\r\n weights = model.layers[i].get_weights()\r\n for weight in weights: # Layer type\r\n print(weight.shape)", "def get_trainable_layers(self):\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def __repr__(self):\n return misc.describe_layer(self, name=\"model\")", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)", "def inception_model(layer_names):\n \n### START CODE HERE ###\n # Load InceptionV3 with the imagenet weights and **without** the fully-connected layer at the top of the network\n inception = tf.keras.applications.inception_v3.InceptionV3(include_top=False,weights='imagenet')\n\n # Freeze the weights of the model's layers (make them not trainable)\n inception.trainable = False\n \n # Create a list of layer objects that are specified by layer_names\n output_layers = [inception.get_layer(name).output for name in layer_names]\n\n # Create the model that outputs the content and style layers\n model = tf.keras.Model(inputs=inception.input, outputs=output_layers)\n \n # return the model\n return model", "def get_layers(model):\n layers = []\n for child in model.children():\n layer_name = child.__class__.__name__\n if layer_name in CONV_OPS:\n layers.append(Layer.from_conv(child))\n elif layer_name in UP_OPS:\n layers.append(Layer.from_up(child))\n else:\n layers.extend(get_layers(child))\n return layers", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def get_all(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n layers.append(layer)\n\n return layers", "def GetLayers(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetLayers(self, *args)", "def model_number_layers(model):\n for idx, layer in enumerate(model.layers):\n print(idx, layer.name)", "def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def loadLayers(self,\n layers: List[keras.layers.Layer]) -> None:\n self.__layers = layers\n\n # Model has been reset, redraw view\n self.modelReset.emit()\n return", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )", "def predict_visualize_layers(self, X):\n\n if isinstance(X, np.ndarray):\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n elif isinstance(X, tf.data.Dataset):\n X = X.map(self._reshape)\n\n for x, y in X:\n x = tf.expand_dims(x, 0)\n\n plt.title(\"Test Sample Input\")\n plt.grid(False)\n plt.imshow(x[0, :, :, 0], aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n layer_outputs = [layer.output for layer in self.model.layers]\n visualisation_model = tf.keras.models.Model(inputs=self.model.input, outputs=layer_outputs)\n\n visualisations = visualisation_model.predict(x)\n\n images_per_row = 4\n\n for layer_name, layer_activation in zip(map(lambda x : x.name, layer_outputs[:3]), visualisations[:3]):\n n_features = layer_activation.shape[-1]\n size = layer_activation.shape[1:3]\n n_cols = n_features // images_per_row\n grid = np.zeros((size[0] * n_cols, images_per_row * size[1]))\n\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n grid[col * size[0]: (col + 1) * size[0], row * size[1]: (row + 1) * size[1]] = channel_image\n\n plt.figure(figsize=(1. / size[0] * grid.shape[1], 3. / size[1] * grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(grid, aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n pred = np.argmax(visualisations[-1])\n print(f\"Predicted class: {Genre(pred)} with probability {visualisations[-1][0][pred]}\\n\"\n + f\"Actual class: {Genre(y)}\")", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n \n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def transformer_layers(self):\n return self._transformer_layers", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def hidden_layers(self):\n\t\tif self.hidden is None:\n\t\t\tself.hidden, self.inputs, self.weights_all, self.biases_all = [], [], [], []\n\t\t\tlast_hidden = self.x\n\t\t\tif self.covnet == 1:\n\t\t\t\ty_conv, self._drouput, self.hidden, self.inputs = deepnn(self.x)\n\t\t\telif self.covnet == 2:\n\t\t\t\ty_c, self.hidden, self.inputs = multi_layer_perceptron(self.x, self.input_size, self.num_of_classes,\n\t\t\t\t self.layerSize[0], self.layerSize[1])\n\t\t\telse:\n\n\t\t\t\tself._drouput = 'dr'\n\t\t\t\t# self.hidden.append(self.x)\n\t\t\t\tfor i in range(1, len(self.all_layer_sizes)):\n\t\t\t\t\tname_scope = 'hidden' + str(i - 1)\n\t\t\t\t\trow_size, col_size = self.all_layer_sizes[i - 1], self.all_layer_sizes[i]\n\t\t\t\t\tactivation_function = self.activation_function\n\t\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function,\n\t\t\t\t\t last_hidden)\n\t\t\t\tname_scope = 'final_layer'\n\t\t\t\trow_size, col_size = self.layerSize[-1], self.num_of_classes\n\t\t\t\tactivation_function = None\n\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function, last_hidden)\n\t\treturn self.hidden", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def keras_add_layers(model, num_classes, keep_prob):\n # DONE: Implement function\n\n # See also lesson \"FCN-8 Decoder\" for structure, and Long_Shelhamer paper\n\n # Walkthrough video started with 1x1 convolution like this, but notes explained\n # that was already done for us (loaded model is not ordinary VGG but already\n # adapted for FCN). In fact the VGG network provided looks very much like\n # the one generated by the Single-Shot Detector caffe code, so I guess they\n # share some common heritage.\n #conv_1x1 = tf.layers.conv2d(vgg_layer7_out, # at/near end of VGG\n # num_classes, # just road/nonroad for us\n # 1, # as 1x1 conv\n # padding='same',\n # kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3))\n\n # Using Tensorboard to visualise the structure of the Udacity VGG model provided, and\n # tf.trainable_variables() to list the dimensions and sizes of the weights and biases\n # for each layer, I arrive at this summary of what shape the output of each layer\n # is (knowing that we started with a 160 height x 576 width x 3 colour channel image).\n # All of the convolution layers have SAME padding and [1,1,1,1] strides so they\n # don't reduce the x-y pixel size. All the pooling layers have [1,2,2,1] strides so\n # they halve the pixel size. I'm ignoring the first dimension (across images), as\n # everything works on one image at a time.\n #\n # Layer name Details Output dimensions\n # <input> raw image 160x576x3\n # conv1_1 conv2d 3x3x3x64, Relu 160x576x64\n # conv1_2 conv2d 3x3x64x64, Relu 160x576x64\n # pool1 pool [1,2,2,1] 80x288x64\n # conv2_1 conv2d 3x3x64x128, Relu 80x288x128\n # conv2_2 conv2d 3x3x128x128, Relu 80x288x128\n # pool2 pool [1,2,2,1] 40x144x128\n # conv3_1 conv2d 3x3x128x256, Relu 40x144x256\n # conv3_2 conv2d 3x3x256x256, Relu 40x144x256\n # conv3_3 conv2d 3x3x256x256, Relu 40x144x256\n # pool3 pool [1,2,2,1] 20x72x256 --> layer3_out\n # conv4_1 conv2d 3x3x256x512, Relu 20x72x512\n # conv4_2 conv2d 3x3x512x512, Relu 20x72x512\n # conv4_3 conv2d 3x3x512x512, Relu 20x72x512\n # pool4 pool [1,2,2,1] 10x36x512 --> layer4_out\n # conv5_1 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_2 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_3 conv2d 3x3x512x512, Relu 10x36x512\n # pool5 pool [1,2,2,1] 5x18x512\n # fc6 conv2d 7x7x512x4096, Relu 5x18x4096\n # dropout dropout(keep_prob) 5x18x4096\n # fc7 conv2d 1x1x4096x4096, Relu 5x18x4096\n # dropout_1 dropout(keep_prob) 5x18x4096 --> layer7_out\n # layer8 conv2d_t 10x36\n\n layer3_out = model.get_layer('block3_pool').output\n layer4_out = model.get_layer('block4_pool').output\n\n # Problem here: TF2 library model doesn't have image-shaped layers 6 & 7 like\n # model provided originally with TF1, but instead is flattened amporphous classifier.\n # So we're working with more 'raw' layer as input. TODO should add back\n # two conv2d layers before this to be like the original\n drop_prob = 1.0 - keep_prob\n\n layer5 = model.get_layer('block5_pool')\n\n layer6_conv = tf.keras.layers.Conv2D(4096,\n 7, # 7x7 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess same as others\n name='layer6_conv')\n\n layer6_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer6_dropout\")\n\n layer7_conv = tf.keras.layers.Conv2D(4096,\n 1, # 1x1 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess\n name='layer7_conv')\n\n layer7_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer7_dropout\")\n\n # Connect up the new layers\n x = layer6_conv(layer5.output)\n x = layer6_dropout(x)\n x = layer7_conv(x)\n layer7 = layer7_dropout(x)\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=layer7)\n\n # We should now have the same structure as the original Udacity version of VGG16,\n # but still need to add the decoder and skip connections as before\n\n # Upsample by 2. We need to work our way down from a kernel depth of 4096\n # to just our number of classes (i.e. 2). Should we do this all in one go?\n # Or keep more depth in as we work upwards? For now doing it all in one hit.\n layer8 = tf.keras.layers.Conv2DTranspose(num_classes, #filters, \n 4, # kernel size taken from classroom example, might experiment\n strides=2, # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer8')\n\n # Now we're at 10x36x2 so we have same pixel resolution as layer4_out. Can't directly add\n # in layer4_out because it has filter depth of 512. (Though we could have had our transpose\n # convolution only downsample to 512 for compatibility... might try that later)\n\n # Squash layer4 output with 1x1 convolution so that it has compatible filter depth (i.e. num_classes)\n layer4_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters,\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer4_squashed')\n # upsample by 2\n layer9 = tf.keras.layers.Conv2DTranspose(num_classes, # filters\n 4, # kernel size taken from classroom example\n strides=(2,2), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer9')\n\n # Now we're at 20x72x2 so same pixel resolution as layer3_out, but need to squash that from\n # 256 filters to 2 (num_classes) before we can add it in as skip connection\n layer3_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer3_squashed')\n\n # upsample by 8 to get back to original image size\n layer10 = tf.keras.layers.Conv2DTranspose(num_classes,\n 32, # Finding quite large kernel works nicely\n strides=(8,8), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer10')\n\n # so now we should be at 160x576x2, same as original image size, 2 classes\n\n # Connect the layers\n x1 = layer8(layer7)\n x2 = layer4_squashed(layer4_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer8_plus_layer4 = tf.keras.layers.add([x1, x2], name='layer8_plus_layer4')\n #layer8_plus_layer4 = tf.add(layer8, layer4_squashed, name='layer8_plus_layer4')\n\n x1 = layer9(layer8_plus_layer4)\n x2 = layer3_squashed(layer3_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer9_plus_layer3 = tf.keras.layers.add([x1, x2], name='layer9_plus_layer3')\n #layer9_plus_layer3 = tf.add(layer9, layer3_squashed, name='layer9_plus_layer3')\n\n predictors = layer10(layer9_plus_layer3) # layer 10 should be same size as image\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=predictors)\n print(\"Model after adding decoder layers:\")\n mod_model.summary()\n\n return mod_model", "def iteration_layers(model, speedup, session, indepth_layer=None):\n if speedup is True:\n layer_names_reduced = ['conv2d1',\n 'conv2d2',\n 'mixed3b',\n 'mixed4b',\n 'mixed5b']\n layer_tensors = [session.graph.get_tensor_by_name(name + \":0\") for name in layer_names_reduced]\n else:\n layer_tensors = model.layer_tensors\n\n return layer_tensors", "def layers(self, layers):\n self._layers = layers\n self.thetas = []\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer\n self.thetas.extend(layer.thetas())", "def viewOnFlatLayer(layer, dimensions, name = None):\n assert max(dimensions) > 1, \"At least one dimension needs to be larger than one.\"\n def slicer():\n nbunits = reduce(lambda x, y: x*y, dimensions, 1)\n insize = layer.indim // nbunits\n outsize = layer.outdim // nbunits\n for index in range(nbunits):\n yield ModuleSlice(layer, insize*index, insize*(index+1), outsize*index, outsize*(index+1))\n c = slicer()\n return ModuleMesh(lambda: next(c), dimensions, name)", "def num_layers(self): # -> int:\n ...", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def __call__(cls, *args, **kwargs):\n layer = super(LayerAspect, cls).__call__(*args, **kwargs)\n\n if Job.Current:\n Job.Current.addLayer(layer)\n \n layer.afterInit()\n return layer", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def __init__(self, model):\n self.output_weights = model.get_layer(\"output\").get_weights()[0]\n self.cam_model = Model(inputs=model.input, outputs=(model.get_layer(\"activation\").output, model.get_layer(\"output\").output))", "def layers(self):\n\n if not self.last_node:\n return []\n return nuke.layers(self.last_node)", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def handle_layers(context, model, toplayer, layerids, materials, update, import_hidden=False):\n #setup main container to hold all layer collections\n layer_col_id=\"Layers\"\n if not layer_col_id in context.blend_data.collections:\n layer_col = context.blend_data.collections.new(name=layer_col_id)\n try:\n toplayer.children.link(layer_col)\n except Exception:\n pass\n else:\n #If \"Layers\" collection is in place, we assume the plugin had imported 3dm before\n layer_col = context.blend_data.collections[layer_col_id]\n\n # build lookup table for LayerTable index\n # from GUID, create collection for each\n # layer\n for lid, l in enumerate(model.Layers):\n if not l.Visible and not import_hidden:\n continue\n lcol = utils.get_iddata(context.blend_data.collections, l.Id, l.Name, None)\n layerids[str(l.Id)] = (lid, lcol)\n utils.tag_data(layerids[str(l.Id)][1], l.Id, l.Name)\n '''\n matname = l.Name + \"+\" + str(l.Id)\n if matname not in materials:\n laymat = utils.get_iddata(context.blend_data.materials, l.Id, l.Name, None)\n if update:\n\t laymat.use_nodes = True\n\t r, g, b, _ = l.Color\n\t principled = PrincipledBSDFWrapper(laymat, is_readonly=False)\n\t principled.base_color = (r/255.0, g/255.0, b/255.0)\n materials[matname] = laymat\n '''\n # second pass so we can link layers to each other\n for l in model.Layers:\n # link up layers to their parent layers\n if str(l.ParentLayerId) in layerids:\n parentlayer = layerids[str(l.ParentLayerId)][1]\n try:\n parentlayer.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass\n # or to the top collection if no parent layer was found\n else:\n try:\n layer_col.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass", "def mini_model(self):\n with tf.variable_scope(name_or_scope='human2d_network'):\n # down-sampling\n resi_0 = res_layer(self._input, filters=16, strides=2, kernel_size=7, training=self.training, name='resi_0')\n resi_1 = res_layer(resi_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_1')\n pool_0 = max_pool_layer(resi_1, name='pool_0')\n resi_2 = res_layer(pool_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_2')\n # hourglass module\n resi_3 = res_layer(resi_2, filters=64, strides=1, kernel_size=3, training=self.training, name='resi_3')\n hrgs_0 = hourglass_layer(resi_3, training=True, name='hrgs_0')\n # keypoint output\n keypoint_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_0')\n keypoint_pre_1 = res_layer(keypoint_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_1')\n keypoint_pre_2 = res_layer(keypoint_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_2')\n keypoint_output_raw = res_layer(keypoint_pre_2, filters=14, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='keypoint_output_raw')\n keypoint_output = tf.nn.sigmoid(x=keypoint_output_raw, name='keypoint_output')\n # silhouette output\n silhouette_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_0')\n silhouette_pre_1 = res_layer(silhouette_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_1')\n silhouette_pre_2 = res_layer(silhouette_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_2')\n silhouette_output_raw = res_layer(silhouette_pre_2, filters=2, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='silhouette_output_raw')\n silhouette_output = tf.nn.softmax(logits=silhouette_output_raw, name='silhouette_output')\n # return\n return None, None, keypoint_output, silhouette_output", "def get_all_structural(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n if layer.is_structural():\n\n layers.append(layer)\n\n # sort layers to make sure numbered nodes are added first and to maintain regular order\n layers.sort(key = lambda x: x.to_int())\n\n return layers", "def visualize_conv_layers(self, layer_name='conv1', savefig_path=\"\"):\n\n # The name of the layer we want to visualize\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n\n # Visualize all filters in this layer.\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n\n # Generate input image for each filter. Here `text` field is used to overlay `filter_value` on top of the image.\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n\n # Generate stitched image palette with 8 cols.\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n\n print('debug')", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n plot_model(vgg, 'vgg19_diagram.png')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def print_layer_io_shapes(model):\n for i, _ in enumerate(model.layers):\n print(\"layer {} input: \".format(i), model.layers[i].input_shape)\n print(\"layer {} output:\".format(i), model.layers[i].output_shape)", "def getVisibilityLayers(self):\n return self._VisibilityLayers", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def visualize_model(self, ax):\n ax.imshow(self.w[1:].reshape(28, -1, order='F').T, cmap='bone')", "def __call__(self):\n custom_obj = {'tf': tf, 'relu6': tf.nn.relu6}\n wfile = self._get_model_weights()\n model = tf.keras.models.load_model(wfile, custom_objects=custom_obj)\n\n if not self._trainable:\n # freeze encoder layers up to\n # expanded_conv_16_project_BN\n for layer in model.layers[1:147]:\n layer.trainable = False\n\n return model", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def list_layers(service):\n r = _post(service)\n if 'layers' in r:\n return [layer(p) for p in r['layers']]\n return", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def ApplyInputs(ss, en):\n ss.Net.InitExt()\n\n lays = [\"Input\", \"Output\"]\n for lnm in lays :\n ly = leabra.Layer(ss.Net.LayerByName(lnm))\n pats = en.State(ly.Nm)\n if pats != 0:\n ly.ApplyExt(pats)", "def filesystem_layers(self):\n pass", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def __init__(self,inputSize,outputSize, *args, **kwds):\n #currently the code is only for 2 hidden layers, apart from in and out\n self._saveFile = kwds.get('saveFile')\n self._inputSize = inputSize\n self._outputSize= outputSize\n self._layer1 = keras.layers.Dense(128,activation='relu')\n self._layer2 = keras.layers.Dense(64,activation='relu') \n self._layer3 = keras.layers.Dense(128,activation='relu')\n self._piLayer = keras.layers.Dense(self._outputSize-1,activation='softmax')\n self._zLayer = keras.layers.Dense(1,activation='tanh')\n self._inputs = keras.Input(shape=(self._inputSize,)) #returns placeholder\n x = self._layer1(self._inputs)\n x = self._layer2(x)\n x = self._layer3(x)\n self._outPi = self._piLayer(x)\n self._outZ = self._zLayer(x)\n self._output = keras.layers.concatenate([self._outPi,self._outZ],axis = -1)\n self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n# self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n self._model.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.99, beta_2=0.999, epsilon=1e-10, decay=0.0001),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n self._epochSize = 256", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def common_layers_with_encoder(self):\n return [\n self.self_attention, self.self_attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_layer_norm\n ]", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def _get_layers(self):\n from keras.engine.topology import InputLayer\n\n layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)]\n logger.info('Inferred %i hidden layers on Keras classifier.', len(layer_names))\n\n return layer_names", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def __init__(self): #initializing\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 1) # one input/feature , one output\n # here where other NN layers are added", "def vis_layer(model, layer, channel):\n num_channels = dla_lucid.LAYERS[layer][1]\n all_vis = []\n for i in range(num_channels):\n if channel is True:\n vis = vis_channel(model, layer, i)\n else:\n vis = vis_neuron(model, layer, i)\n all_vis.append(vis)\n\n all_vis_array = np.array(all_vis)\n return all_vis_array", "def setup_to_transfer_learn(model):\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def LayerAddflatten(bottom_model, num_classes):\n top_model = bottom_model.output\n top_model = Flatten(name = \"flatten\")(top_model)\n top_model = Dense(526, activation = \"relu\")(top_model)\n top_model = Dense(263, activation = \"relu\")(top_model)\n top_model = Dense(num_classes, activation = \"sigmoid\")(top_model)\n return top_model", "def _export_model(self):\n graph = ComputeGraph.from_onnx(self.onnx_model.graph)\n\n print(\"Running constant propagation\")\n constant_states = constant_propagation(graph)\n\n self._remove_constants(graph, constant_states)\n self._remove_nops(graph, constant_states)\n\n # Add shape information from constant propagation:\n for var, res in constant_states.items():\n if var in graph.shape_dict:\n shape = graph.shape_dict[var]\n if res.shape != shape:\n print(\"Warning: Shapes do not match: \", var, res.shape, shape)\n if res.shape is not None:\n print(\"Replacing shape {} with {}\".format(shape, res.shape))\n graph.shape_dict[var] = res.shape\n elif res.shape is not None:\n graph.shape_dict[var] = res.shape\n\n print(\"Inference graph:\")\n for node in graph.nodes:\n inputs = node.inputs\n input_shapes = (str(graph.shape_dict[i]) for i in node.inputs if i in graph.shape_dict)\n outputs = node.outputs\n output_shapes = (str(graph.shape_dict[o]) for o in node.outputs if o in graph.shape_dict)\n print(\"{:<24} {:<20} {:<30} {:<30} {:<20} {:<30}\".format(node.name,\n node.op_type,\n \",\".join(inputs),\n \",\".join(input_shapes),\n \",\".join(outputs),\n \",\".join(output_shapes)))\n\n memory_manager = MemoryManager()\n\n self._generate_weights_file(graph)\n\n self.dummy_input = generate_dummy_main(graph)\n\n self.reference_input = generate_reference_main(graph)\n\n self._generate_network_initialization(graph, memory_manager)\n\n self._generate_network_cleanup(graph, memory_manager)\n\n implementations = self._select_implementations(graph, memory_manager)\n schedule = self._get_schedule(graph, implementations)\n # self._print_live_ranges(schedule)\n\n input_names = [\"input_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.inputs]\n output_names = [\"output_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.outputs]\n\n \"\"\"Currently we only allow single input (no batch processing) to the CNN, but this may be multi-channel input\"\"\"\n inputs = graph.inputs\n if len(inputs) > 1:\n print(\"ERROR: Multiple inputs not supported!\")\n exit(1)\n else:\n input_shape = graph.shape_dict[inputs[0].name]\n print(\"Input shape: {}\".format(input_shape))\n\n if len(input_shape) == 4:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 3:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 2:\n print(\"Input is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n outputs = graph.outputs\n if len(outputs) > 1:\n print(\"ERROR: Multiple outputs not supported\")\n exit(1)\n else:\n output_shape = graph.shape_dict[outputs[0].name]\n print(\"Output shape: {}\".format(output_shape))\n\n if len(output_shape) == 2:\n print(\"Output is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n output_defs = [\"pico_cnn::naive::Tensor *\" + n for n in output_names]\n elif len(output_shape) == 3:\n print(\"ERROR: Unknown output shape of network: {}\".format(output_shape))\n exit(1)\n elif len(output_shape) == 4:\n print(\"ERROR: Multi-dimensional output is currently not supported.\")\n exit(1)\n\n network_def = \"void Network::run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n network_def_header = \"void run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n\n layer_declaration_code = \"\"\n layer_allocation_code = \"\"\n layer_execution_code = \"\"\n layer_deletion_code = \"\"\n\n \"\"\"Iterate over all tasks in the schedule, put some debug info in the code and the pico-cnn implementation.\"\"\"\n for task in schedule:\n num, node, impl = task\n layer_allocation_code += \" //Layer \" + str(num) + \" \" + node.name + \" \" + node.op_type + \"\\n\"\n layer_allocation_code += \" //Attributes\\n\"\n for key, val in node.attrs.items():\n layer_allocation_code += \" // \" + str(key) + \": \" + str(val) + \"\\n\"\n layer_allocation_code += \" //Parameters\\n\"\n layer_allocation_code += \" //Inputs: \" + \",\".join(node.inputs) + \"\\n\"\n layer_allocation_code += \" //Outputs: \" + \",\".join(node.outputs) + \"\\n\"\n layer_allocation_code += \" //Shape:\\n\"\n for i in node.inputs:\n layer_allocation_code += \" // {}: {}\\n\".format(i, graph.get_shape(i))\n for o in node.outputs:\n layer_allocation_code += \" // {}: {}\\n\".format(o, graph.get_shape(o))\n\n if impl:\n layer_declaration_code += impl.generate_declaration()\n layer_declaration_code += \"\\n\"\n\n layer_allocation_code += impl.generate_allocation()\n layer_allocation_code += \"\\n\"\n\n layer_execution_code += impl.generate_execution()\n layer_execution_code += \"\\n\"\n\n layer_deletion_code += impl.generate_deletion()\n layer_deletion_code += \"\\n\"\n\n else:\n print(\"ERROR: Unsupported layer: {}! Aborting code generation.\".format(node.op_type))\n return 1\n\n self.constructor_code += layer_allocation_code + \"\\n\"\n self.destructor_code += layer_deletion_code + \"\\n\"\n\n # # TODO: What does this loop do?\n # for id, buffer in memory_manager.buffers.items():\n # if graph.is_tensor(id):\n # continue\n # if graph.is_input(id):\n # continue\n # if graph.is_output(id):\n # continue\n\n network_code: Text = \"#include \\\"network.h\\\"\\n\\n\"\n network_code += \"Network::Network() {\\n\\n\"\n network_code += self.constructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += \"Network::~Network() {\\n\"\n network_code += self.destructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += network_def+\"{\\n\"\n network_code += layer_execution_code\n\n network_code += \"}\\n\\n\"\n\n network_header = \"#ifndef NETWORK_H\\n\"\n network_header += \"#define NETWORK_H\\n\\n\"\n network_header += \"#include \\\"pico-cnn/pico-cnn.h\\\"\\n\\n\"\n network_header += \"class Network {\\n\"\n network_header += \"public:\\n\"\n network_header += \"Network();\\n\"\n network_header += \"~Network();\\n\"\n network_header += network_def_header + \"; \\n\\n\"\n network_header += self.buffer_declaration + \"\\n\"\n network_header += layer_declaration_code\n network_header += \"};\\n\"\n network_header += \"#endif //NETWORK_H\\n\"\n\n self.network_code = network_code\n self.network_header = network_header\n\n \"\"\"\n Create Makefile containing a target for the generated dummy input and a network specific one.\n The code for the network specific input has to be written manually.\n \"\"\"\n # TODO: Does this need to be more sophisticated?\n self.makefile = \"CC = g++\\n\"\n self.makefile += \"CFLAGS = -std=c++11 -Wall -O2 -march=native -DINFO\\n\"\n self.makefile += \"LDFLAGS = -L../../../pico-cnn\\n\"\n self.makefile += \"LD_LIBS = -lpico-cnn -lm\\n\\n\"\n self.makefile += \"# list of all generated .cpp files.\\n\"\n self.makefile += \"NETWORK_LIST = network.cpp\"\n self.makefile += \"\\n\\ndummy_input: dummy_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) dummy_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) $(LDFLAGS) $(LD_LIBS) -o dummy_input\"\n self.makefile += \"\\n\\nreference_input: reference_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) reference_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o reference_input\"\n self.makefile += \"\\n\\n{}: {}.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\".format(self.model_name, self.model_name)\n self.makefile += \"$(CC) {}.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o {}\".format(self.model_name, self.model_name)\n self.makefile += \"\\n\\nall: dummy_input reference_input {}\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: clean\\n\"\n self.makefile += \"clean:\\n\\trm -rf {} dummy_input reference_input\\n\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: libpico-cnn.a\\n\"\n self.makefile += \"libpico-cnn.a:\\n\\t$(MAKE) -C ../../../pico-cnn\"\n\n self.save(\"./generated_code/{}\".format(self.model_name))", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def __call__(self, inputs, **kwargs):\n # Actually call the layer (optionally building it).\n output = super(Layer, self).__call__(inputs, **kwargs)\n if context.in_eager_mode():\n return output\n\n # Un-built subclassed network: build it\n if isinstance(self, Network) and not self.inputs:\n self._set_inputs(inputs, training=kwargs.get('training'))\n\n # Update learning phase info.\n output_tensors = to_list(output)\n uses_lp = any(\n [getattr(x, '_uses_learning_phase', False) for x in to_list(inputs)])\n uses_lp = getattr(self, 'uses_learning_phase', False) or uses_lp\n for i in range(len(output_tensors)):\n output_tensors[i]._uses_learning_phase = getattr(\n output_tensors[i], '_uses_learning_phase', False) or uses_lp\n\n # Optionally load weight values that were specified at layer instantiation.\n if hasattr(self, '_initial_weights') and self._initial_weights is not None:\n self.set_weights(self._initial_weights)\n del self._initial_weights\n return output", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def common_layers(self):\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n CustomTransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(CustomTransformer, self).build(unused_input_shapes)", "def create_feature_layers(self):\n feature_columns = [tf.feature_column.numeric_column(name,\n normalizer_fn=lambda x: (x - self.train_features[\n name].mean()) /\n self.train_features[name].std())\n for name in self.feature_names]\n\n self.feature_layers = layers.DenseFeatures(feature_columns)\n return 'feature layers had been created'", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf" ]
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099", "0.57425326", "0.57222074", "0.5712829", "0.5712829", "0.56764674", "0.56520236", "0.56172395", "0.5594013", "0.55928683", "0.5580619", "0.5579592", "0.55595297", "0.5550327", "0.55495214", "0.5527533", "0.5517842", "0.55077225", "0.5502721", "0.55026466", "0.549963", "0.5492102", "0.5485171", "0.54708755", "0.54690754", "0.5463765", "0.5462446", "0.5460833", "0.5456318", "0.5452711", "0.5444138", "0.5443301", "0.5439382", "0.54338735", "0.5421428", "0.5419587", "0.5408111", "0.54061496", "0.5398926", "0.5398483", "0.53930515", "0.53733885", "0.53709096", "0.53703576", "0.5361642", "0.5359667", "0.53589463", "0.5354624", "0.53545886", "0.535422", "0.53375673", "0.5334994", "0.53269535", "0.53221035", "0.5317047", "0.53027296", "0.52973855", "0.52901715", "0.5275039", "0.5274979", "0.52737033", "0.5266431", "0.5261835", "0.52605796", "0.52600664", "0.52496713", "0.52330184", "0.5231018", "0.52298784", "0.5227895", "0.522487", "0.52173674", "0.52170604", "0.5214551", "0.52106416", "0.5199109", "0.5191151", "0.518876", "0.5188223", "0.5186258", "0.5180307" ]
0.0
-1
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(resnet_v2_152, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['resnet_v2_152']()): net, end_points = networks_map['resnet_v2_152']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_152/logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def initialize(self):\n logging.info(\"Loading model.\")\n\n self._bleurt_graph = tf.Graph()\n with self._bleurt_graph.as_default():\n\n imported = tf.saved_model.load(self.checkpoint)\n bleurt_model_ops = imported.signatures[\"serving_default\"]\n self._bleurt_ops = bleurt_model_ops(\n input_ids=tf.compat.v1.placeholder(tf.int64, name=\"input_ids\"),\n input_mask=tf.compat.v1.placeholder(tf.int64, name=\"input_mask\"),\n segment_ids=tf.compat.v1.placeholder(tf.int64, name=\"segment_ids\"))\n\n init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.tables_initializer())\n\n self.session = tf.compat.v1.Session(graph=self._bleurt_graph)\n self.session.run(init_op)\n\n logging.info(\"Done.\")", "def create_graph(self):\n self.graph = tf.Graph()\n model_type = self.options['model_type']\n optimiser_selected = self.options['optimizer']\n\n with self.graph.as_default():\n self.tf_dataset = tf.placeholder(tf.float32,\n shape=(None, self.options['num_steps'], self.input_dimensions))\n self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))\n self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')\n\n # Forward pass\n if model_type == 'rnn':\n self.predict = self.rnn_model(self.tf_dataset)\n elif model_type == 'lstm':\n self.predict = self.lstm_model(self.tf_dataset)\n else:\n raise NotImplementedError(\"Unimplemented RNN model keyword\")\n\n self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))\n\n if self.options['regularisation_coeff'] > 0.:\n # Add in L2 penalty for regularisation if required\n penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)\n for var in tf.trainable_variables())\n self.loss += penalty\n\n if self.options['use_customised_optimizer'] is False:\n if optimiser_selected == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif optimiser_selected == 'grad':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif optimiser_selected == 'ada':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif optimiser_selected == 'rms':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n else:\n raise NotImplementedError(\"Unimplemented built-in optimiser keyword.\")\n else:\n self.optimizer = self.options['customized_optimizer']\n self.minimise = self.optimizer.minimize(self.loss)", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _initialize_session(self):\n config = tf.ConfigProto()\n # restrict model GPU memory utilization to min required\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n tf_ver = int(tf.__version__.split('.')[1])\n if TF_VERSION <= 0.10:\n self.sess.run(tf.initialize_all_variables())\n logswriter = tf.train.SummaryWriter\n else:\n self.sess.run(tf.global_variables_initializer())\n logswriter = tf.summary.FileWriter\n self.saver = tf.train.Saver()\n self.summary_writer = logswriter(self.logs_path, graph=self.sess.graph) # change by ccx, add the graph_def", "def _build_graph(self):\n\n self.graph = tf.Graph()\n\n # set self.graph as default graph\n with self.graph.as_default():\n # # clear old variables\n # tf.reset_default_graph()\n\n # set random seed\n if self.random_seed is not None:\n tf.set_random_seed(self.random_seed)\n\n self._create_placeholders()\n self._create_variables()\n\n self._create_prediction()\n\n self._create_loss()\n self._create_optimizer()\n\n self._init = tf.global_variables_initializer()\n\n self.saver = tf.train.Saver()\n\n # create session\n self.sess = tf.Session(graph=self.graph)", "def init_target_net(self, sess):\n sess.run(self.init_target_net_op)", "def setup(self, context: ExecutionContext) -> BaseStep:\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True", "def _init_session(self):\n self.sess = tf.Session(graph=self.g)\n self.sess.run(self.init)", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W1 = tf.get_variable(\"W1\", shape=[self.h, self.N], initializer=tf.truncated_normal_initializer)\n self.b1 = tf.get_variable(\"b1\", shape=[self.h, 1], initializer=tf.zeros_initializer)\n\n self.W2 = tf.get_variable(\"W2\", shape=[self.C, self.h], initializer=tf.truncated_normal_initializer)\n self.b2 = tf.get_variable(\"b2\", shape=[self.C, 1], initializer=tf.truncated_normal_initializer)\n\n self.z1 = tf.matmul(self.W1, self.X) + self.b1\n self.a1 = self.activation(self.z1)\n\n self.z2 = tf.matmul(self.W2, self.a1) + self.b2\n self.y_hat = tf.nn.softmax(self.z2, dim=0)\n\n self.l2_reg = tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.W2)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z2, dim=0)) \\\n + self.beta * self.l2_reg\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def _initialize_eval_graph(self):\n self.X_test_tf = tf.placeholder(tf.int64, shape=[1, 3])\n\n self.table_entity_lookup_left = None\n self.table_entity_lookup_right = None\n self.table_reln_lookup = None\n\n all_entities_np = np.int64(np.arange(len(self.ent_to_idx)))\n\n if self.is_filtered:\n all_reln_np = np.int64(np.arange(len(self.rel_to_idx)))\n self.table_entity_lookup_left = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_left, dtype=np.int64))\n , 0)\n self.table_entity_lookup_right = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_right, dtype=np.int64))\n , 0)\n self.table_reln_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_reln_np,\n np.array(self.relation_primes, dtype=np.int64))\n , 0)\n\n # Create table to store train+test+valid triplet prime values(product)\n self.table_filter_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(np.array(self.filter_keys, dtype=np.int64),\n np.zeros(len(self.filter_keys), dtype=np.int64))\n , 1)\n\n corruption_entities = self.eval_config.get('corruption_entities', DEFAULT_CORRUPTION_ENTITIES)\n\n if corruption_entities == 'all':\n corruption_entities = all_entities_np\n elif isinstance(corruption_entities, list):\n corruption_entities = corruption_entities\n else:\n msg = 'Invalid type for corruption entities!!!'\n logger.error(msg)\n raise ValueError(msg)\n\n self.corruption_entities_tf = tf.constant(corruption_entities, dtype=tf.int64)\n\n self.out_corr, self.out_corr_prime = generate_corruptions_for_eval(self.X_test_tf,\n self.corruption_entities_tf,\n self.eval_config.get('corrupt_side',\n DEFAULT_CORRUPT_SIDE),\n self.table_entity_lookup_left,\n self.table_entity_lookup_right,\n self.table_reln_lookup)\n\n if self.is_filtered:\n # check if corruption prime product is present in dataset prime product\n self.presense_mask = self.table_filter_lookup.lookup(self.out_corr_prime)\n self.filtered_corruptions = tf.boolean_mask(self.out_corr, self.presense_mask)\n else:\n self.filtered_corruptions = self.out_corr\n\n self.concatinated_set = tf.concat([self.X_test_tf, self.filtered_corruptions], 0)\n\n e_s, e_p, e_o = self._lookup_embeddings(self.concatinated_set)\n self.scores_predict = self._fn(e_s, e_p, e_o)\n self.score_positive = tf.gather(self.scores_predict, 0)\n self.rank = tf.reduce_sum(tf.cast(self.scores_predict >= self.score_positive, tf.int32))", "def _setup_graph(self):\n sess = tf.Session()\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n # raise NotImplementedError\n state_ph, action_ph, next_state_ph, reward_ph = self._setup_placeholders()\n next_state_pred = self._dynamics_func(state_ph, action_ph)\n loss, optimizer = self._setup_training(state_ph, next_state_ph, next_state_pred)\n\n # fit cost function\n reward_pred = self._reward_func(state_ph, action_ph, next_state_pred)\n reawrd_loss, reward_optimizer = self._reward_training(reward_ph, reward_pred)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n # self._rollout_state_ph = tf.placeholder(tf.float32, (1, self._state_dim), name='rollout_state_ph')\n best_action = self._setup_action_selection(state_ph)\n\n # BONUS\n self._best_action_cross_entropy = self._cross_entropy_action_selection(state_ph)\n\n sess.run(tf.global_variables_initializer())\n\n return sess, state_ph, action_ph, next_state_ph, reward_ph, \\\n next_state_pred, loss, optimizer, best_action, reward_pred, reawrd_loss, reward_optimizer", "def add_initializer_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"initialization\") as scope:\n self.init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer())", "def _init_session(self):\n self.sess = tf.Session(config=self.config, graph=self.g)\n self.sess.run(self.init)", "def setup(self):\n if not hasattr(logger, 'LOG_DIR'):\n raise RuntimeError(\"logger directory wasn't set!\")\n\n self._setup() # subclass will setup the graph\n\n describe_model()\n # some final operations that might modify the graph\n logger.info(\"Setup summaries ...\")\n self.summary_writer = tf.summary.FileWriter(logger.LOG_DIR, graph=tf.get_default_graph())\n # create an empty StatHolder\n self.stat_holder = StatHolder(logger.LOG_DIR)\n\n logger.info(\"Setup callbacks graph ...\")\n self.config.callbacks.setup_graph(weakref.proxy(self))\n self.config.session_init._setup_graph()\n\n def after_init(scaffold, sess):\n logger.info(\"Graph variables initialized.\")\n self.config.session_init._run_init(sess)\n\n scaffold = tf.train.Scaffold(\n init_op=tf.global_variables_initializer(),\n init_fn=after_init)\n logger.info(\"Finalize the graph, create the session ...\")\n self.monitored_sess = tf.train.MonitoredSession(\n session_creator=tf.train.ChiefSessionCreator(\n scaffold=scaffold, config=self.config.session_config),\n hooks=self.config.callbacks.get_hooks())\n self.hooked_sess = self.monitored_sess # just create an alias\n self.sess = self.monitored_sess._tf_sess() # expose the underlying session also", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def initialize_session(self):\r\n self.logger.info(\"Initializing tf session\")\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.saver = tf.train.Saver()", "def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_50, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])", "def __init__(self, resnet_size, bottleneck, num_classes, \n num_filters, kernel_size, conv_stride, time_kernel_size,\n first_pool_size, first_pool_stride,\n block_sizes, block_strides,\n final_size, data_format=None,\n model_name_scope='resnet_model'):\n self.resnet_size = resnet_size\n\n if not data_format:\n data_format = (\n 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')\n\n self.resnet_version = 2\n\n self.bottleneck = bottleneck\n if bottleneck:\n self.block_fn = _bottleneck_block_v2\n else:\n self.block_fn = _building_block_v2\n\n self.data_format = data_format\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.conv_stride = conv_stride\n self.time_kernel_size = time_kernel_size\n self.first_pool_size = first_pool_size\n self.first_pool_stride = first_pool_stride\n self.block_sizes = block_sizes\n self.block_strides = block_strides\n self.final_size = final_size\n self.dtype = tf.float32\n self.pre_activation = True\n self.model_name_scope = model_name_scope", "def __init__(self, sess, network, learning_rate=0.1, discount_factor=0.99):\n self.sess = sess\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.network = network\n self.defineUpdateOperations()\n self.init = tf.global_variables_initializer()\n self.initialize_variables()", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def _init_vars(self):\n print \"Initializing session\"\n self.x = tf.placeholder(tf.float32, shape=[None, 784])\n self.y = tf.placeholder(tf.float32, shape=[None, 10])", "def _build_graph(self, seed):\n self.g = tf.Graph()\n with self.g.as_default():\n tf.set_random_seed(seed)\n self._placeholders()\n self._policy_nn()\n self._loss_train_op()\n self.init = tf.global_variables_initializer()", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n session_conf = tf.ConfigProto(\n allow_soft_placement=self.FLAGS.allow_soft_placement,\n log_device_placement=self.FLAGS.log_device_placement)\n self.session = tf.Session(config=session_conf)\n self.session.run(tf.global_variables_initializer())\n try: \n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.FLAGS.num_checkpoints)\n except:\n pass", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n self.sess = tf.compat.v1.Session()\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()", "def initialize_and_train(self):\n self.probabilities = tf.nn.softmax(self.hidden_layer3,name = 'test_probabilities')\n \n \"\"\"Calulates 10 probabilities based off of our input nodes, than calculates the error using\n cross entropy function, which turns those ten probabilities into an integer value. we than take \n the mean of the cross entropy errors. Logits are the values to be used as input to softmax\"\"\"\n self.error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits = self.hidden_layer3, labels = self.outputs, name = 'error'))\n \"\"\"initialize all of our variables with acutal numbers\"\"\"\n with tf.Session() as session:\n session.run(self.filters.initializer)\n session.run(self.filters2.initializer)\n session.run(self.weights.initializer)\n session.run(self.weights2.initializer)\n session.run(self.bias.initializer)\n session.run(self.bias2.initializer)\n session.run(self.weights3.initializer)\n session.run(self.bias3.initializer)\n \"\"\"create gradient descent function\"\"\"\n self.train = tf.train.GradientDescentOptimizer(0.1).minimize(self.error)\n \n \"\"\"these are our two index's that give us our batch size for gradient descent below\"\"\"\n index1 = 0\n index2 = 500\n \"\"\"this for loop runs mini-batch gradient descent and prints error every ith iteration\"\"\"\n for i in range(4500): \n \"\"\"if our second index is less than the # of training sets, input propper index in feed_dict and run\"\"\"\n if index2 < int(self.images.shape[0]): \n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]} \n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"add 500 to each index and continue iterations\"\"\"\n index1 += 500\n index2 += 500\n \n elif index2 >= int(self.images.shape[0]):\n \"\"\"if our second index is greater than or equal to # of training sets, \n input propper index in feed_dict and run\"\"\"\n index2 == int(self.images.shape[0])\n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]}\n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"reset the index back to its orginal value and continue iterations\"\"\"\n index1 = 0\n index2 = 500 \n\n if iteration % 100 == 0: \n print(index1,index2)\n print('#', iteration, 'error is:', session.run(self.error, feed_dict))\n \"\"\"save the final results of our weights/filter variables as outputfile\"\"\"\n self.saver = tf.train.Saver() \n self.saver.save(session, \"/Users/bennicholl/Desktop/outputfile\")\n \n \"\"\"this below code is for tensorboard, a data visualization tool\"\"\"\n \"\"\"open local host:6006 on chrome, than type in hashtagged code block below in a terminal\"\"\"\n #python -m tensorboard.main --logdir=\"/Users/bennicholl/Desktop/output3\"\n with tf.Session() as session:\n writer = tf.summary.FileWriter(\"/Users/bennicholl/Desktop/output3\", session.graph)\n writer.close()", "def define_graph(self):\n with tf.name_scope('discriminator'):\n ##\n # Setup scale networks. Each will make the predictions for images at a given scale.\n ##\n\n self.scale_nets = []\n for scale_num in xrange(self.num_scale_nets):\n with tf.name_scope('scale_net_' + str(scale_num)):\n scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)\n self.scale_nets.append(DScaleModel(scale_num,\n int(self.height * scale_factor),\n int(self.width * scale_factor),\n self.scale_conv_layer_fms[scale_num],\n self.scale_kernel_sizes[scale_num],\n self.scale_fc_layer_sizes[scale_num]))\n\n # A list of the prediction tensors for each scale network\n self.scale_preds = []\n for scale_num in xrange(self.num_scale_nets):\n self.scale_preds.append(self.scale_nets[scale_num].preds)\n\n ##\n # Data\n ##\n\n self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')\n\n ##\n # Training\n ##\n\n with tf.name_scope('training'):\n # global loss is the combined loss from every scale network\n self.global_loss = adv_loss(self.scale_preds, self.labels)\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')\n self.train_op = self.optimizer.minimize(self.global_loss,\n global_step=self.global_step,\n name='train_op')\n\n # add summaries to visualize in TensorBoard\n loss_summary = tf.summary.scalar('loss_D', self.global_loss)\n self.summaries = tf.summary.merge([loss_summary])", "def __init__(self, graph, weights,\n input_tensor_name=None,\n output_tensor_name=None):\n\n self.sess = tf.Session()\n new_saver = tf.train.import_meta_graph(graph)\n new_saver.restore(self.sess, weights)\n\n get_tensor = tf.get_default_graph().get_tensor_by_name\n # Get the initial place holder, else default\n if input_tensor_name:\n self.placeholder = get_tensor(input_tensor_name)\n else:\n self.placeholder = get_tensor('Placeholder:0')\n\n if output_tensor_name:\n self.softmax = get_tensor(output_tensor_name)\n else:\n self.softmax = get_tensor('Softmax:0')\n\n # Save trainables into params\n trainable_params = tf.trainable_variables()\n layers = {}\n params = {}\n\n def add_to_layer(name):\n try:\n layers[name] = get_tensor(\"{}:0\".format(name))\n except KeyError:\n try:\n layers[name] = get_tensor(\"{}/Relu:0\".format(name))\n except KeyError:\n print(\"Activation Not Found.\")\n pass\n\n for v in trainable_params:\n if 'weight' in v.name:\n name = v.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Pooling layers usually don't have a nice way of gathering.\n for n in tf.get_default_graph().as_graph_def().node:\n if 'pool' in n.name:\n v = get_tensor(\"{}:0\".format(n.name))\n name = n.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Get trainable params - 1 holds locations the other is a dummy script\n self.params = {}\n self._params = params\n self.layers = layers\n # Save empty dict into blobs\n self.blobs = {}", "def build_graph(self):\n # Print\n if self.verbose:\n print('Building Yolo Graph....')\n # Reset default graph\n tf.reset_default_graph()\n # Input placeholder\n self.x = tf.placeholder('float32', [None, 448, 448, 3])\n # conv1, pool1\n self.conv1 = self.conv_layer(1, self.x, 64, 7, 2)\n self.pool1 = self.maxpool_layer(2, self.conv1, 2, 2)\n # size reduced to 64x112x112\n # conv2, pool2\n self.conv2 = self.conv_layer(3, self.pool1, 192, 3, 1)\n self.pool2 = self.maxpool_layer(4, self.conv2, 2, 2)\n # size reduced to 192x56x56\n # conv3, conv4, conv5, conv6, pool3\n self.conv3 = self.conv_layer(5, self.pool2, 128, 1, 1)\n self.conv4 = self.conv_layer(6, self.conv3, 256, 3, 1)\n self.conv5 = self.conv_layer(7, self.conv4, 256, 1, 1)\n self.conv6 = self.conv_layer(8, self.conv5, 512, 3, 1)\n self.pool3 = self.maxpool_layer(9, self.conv6, 2, 2)\n # size reduced to 512x28x28\n # conv7 - conv16, pool4\n self.conv7 = self.conv_layer(10, self.pool3, 256, 1, 1)\n self.conv8 = self.conv_layer(11, self.conv7, 512, 3, 1)\n self.conv9 = self.conv_layer(12, self.conv8, 256, 1, 1)\n self.conv10 = self.conv_layer(13, self.conv9, 512, 3, 1)\n self.conv11 = self.conv_layer(14, self.conv10, 256, 1, 1)\n self.conv12 = self.conv_layer(15, self.conv11, 512, 3, 1)\n self.conv13 = self.conv_layer(16, self.conv12, 256, 1, 1)\n self.conv14 = self.conv_layer(17, self.conv13, 512, 3, 1)\n self.conv15 = self.conv_layer(18, self.conv14, 512, 1, 1)\n self.conv16 = self.conv_layer(19, self.conv15, 1024, 3, 1)\n self.pool4 = self.maxpool_layer(20, self.conv16, 2, 2)\n # size reduced to 1024x14x14\n # conv17 - conv24\n self.conv17 = self.conv_layer(21, self.pool4, 512, 1, 1)\n self.conv18 = self.conv_layer(22, self.conv17, 1024, 3, 1)\n self.conv19 = self.conv_layer(23, self.conv18, 512, 1, 1)\n self.conv20 = self.conv_layer(24, self.conv19, 1024, 3, 1)\n self.conv21 = self.conv_layer(25, self.conv20, 1024, 3, 1)\n self.conv22 = self.conv_layer(26, self.conv21, 1024, 3, 2)\n self.conv23 = self.conv_layer(27, self.conv22, 1024, 3, 1)\n self.conv24 = self.conv_layer(28, self.conv23, 1024, 3, 1)\n # size reduced to 1024x7x7\n # fc1, fc2, fc3\n self.fc1 = self.fc_layer(29, self.conv24, 512,\n flatten=True, linear=False)\n self.fc2 = self.fc_layer(\n 30, self.fc1, 4096, flatten=False, linear=False)\n self.fc3 = self.fc_layer(\n 31, self.fc2, 1470, flatten=False, linear=True)\n # Run session\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, self.weightFile)\n # Print\n print('Graph built.')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def build_graph(self):\n\n ##### Build Graph #####\n baseModel.build_graph(self)\n\n ##### Create Optimization #####\n with tf.variable_scope(\"optimize\"):\n self.add_loss()\n self.add_accuracy()\n self.initialize_learning_rate()\n self.initialize_optimization()\n\n ##### History and Checkpoints #####\n self.hasTrained = False\n self._lastSaved = collections.defaultdict(None)\n self.history = collections.defaultdict(list)\n self.saver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestLossSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestAccSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n\n logging.basicConfig(level=logging.INFO)\n log_handler = logging.FileHandler(\"log.txt\")\n logging.getLogger().addHandler(log_handler)\n\n self.summaries = tf.summary.merge_all()", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self):\n super(Model, self).__init__()\n\n self.batch_size = 200\n self.hidden_size = 264\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\n\n self.dense_1 = tf.keras.layers.Dense(self.hidden_size, activation='relu')\n self.dense_2 = tf.keras.layers.Dense(self.hidden_size, activation='relu')", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']", "def build(self, mode):\n assert mode in ['train', 'eval']\n self.mode = mode\n self._setup_misc(mode)\n self._setup_images_and_labels()\n self._build_graph(self.images, self.labels, mode)\n\n self.init = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def start(self):\n self.sess = tf.Session()\n tf.global_variables_initializer().run(session=self.sess)", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def init_machine_learning(self):\n\t\ttry:\n\t\t\tprint(\"debut du chargement! \")\n\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\n\t\t\tself.session = tf.Session()\n\t\t\tnew_saver = tf.train.import_meta_graph(\"./modeles/avancer/model_avancer.meta\")\n\t\t\tnew_saver.restore(self.session, tf.train.latest_checkpoint('./'))\n\t\t\tall_vars = tf.get_collection('vars')\n\n\t\t\t#self.y_conv = all_vars[3]\n\t\t\t#self.keep_prob = all_vars[4]\n\n\t\t\t#for v in all_vars:\n\t\t\t#\tv_ = self.session.run(v)\n\t\t\t#\tprint(v_)\n\n\t\t\tprint(\"chargement terminer\")\n\t\t\t\n\t\texcept:\n\t\t\t\n\t\t\tprint(\"le chargement a echouer ! \\n creation d'un nouveau modele !\")\n\t\t\tself.mnist = input_data.read_data_sets(self.option[\"ch_mnist\"], one_hot=True)\n\n\t\t\tself.session = tf.InteractiveSession()\n\t\t\t#creation des variables\n\t\t\tW_conv1 = self.weight_variable([5, 5, 1, 32])\n\t\t\tb_conv1 = self.bias_variable([32])\n\n\t\t\t# Placeholder\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\t\t\ty_ = tf.placeholder(tf.float32, [None, 10])\n\n\t\t\t# Reshape\n\t\t\tx_image = tf.reshape(self.x , [-1,28,28,1])\n\n\t\t\th_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)\n\t\t\th_pool1 = self.max_pool_2x2(h_conv1)\n\n\t\t\tW_conv2 = self.weight_variable([5, 5, 32, 64])\n\t\t\tb_conv2 = self.bias_variable([64])\n\n\t\t\th_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)\n\t\t\th_pool2 = self.max_pool_2x2(h_conv2)\n\n\t\t\tW_fc1 = self.weight_variable([7 * 7 * 64, 1024])\n\t\t\tb_fc1 = self.bias_variable([1024])\n\n\t\t\th_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\t\t\th_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\t\t\t\n\t\t\tself.keep_prob = tf.placeholder(tf.float32)\n\t\t\th_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n\t\t\tW_fc2 = self.weight_variable([1024, 10])\n\t\t\tb_fc2 = self.bias_variable([10])\n\n\t\t\tself.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n\n\t\t\t\t\t\n\t\t\tcross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.y_conv, y_))\n\t\t\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t\t\tcorrect_prediction = tf.equal(tf.argmax(self.y_conv,1), tf.argmax(y_ ,1))\n\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\t\tself.session.run(tf.global_variables_initializer())\n\n\t\t\tprint(\"sauvegarde variable\")\n\t\t\ttf.add_to_collection(\"vars\", h_fc1_drop)\n\t\t\ttf.add_to_collection(\"vars\", W_fc2)\n\t\t\ttf.add_to_collection(\"vars\", b_fc2)\n\t\t\ttf.add_to_collection(\"vars\", self.y_conv)\n\t\t\ttf.add_to_collection(\"vars\", self.keep_prob)\n\n\t\t\tprint(\"lancement antrainement modele\")\n\t\t\t\n\t\t\tfor i in range(1000):\n\t\t\t\tbatch = self.mnist.train.next_batch(50)\n\t\t\t\tif i%100 == 0:\n\t\t\t\t\ttrain_accuracy = accuracy.eval(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob : 1.0})\n\t\t\t\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy))\n\t\t\t\ttrain_step.run(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob: 0.5})\n\n\t\t\tbatchSize = 5000\n\t\t\tfor i in range(len(self.mnist.train.labels) // batchSize):\n\t\t\t\tbat = self.mnist.test.next_batch(100)\n\t\t\t\tprint(\"test accuracy %g\" % accuracy.eval(feed_dict={self.x : bat[0], y_: bat[1], self.keep_prob: 1.0}))\n\t\t\t\n\t\t\t#sauvegarde des données\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsave_path = saver.save(self.session, \"./modeles/avancer/model_avancer\")\n\t\t\tprint(\"Model saved in file: %s\" % save_path)", "def init(self,sess):\n if not os.path.isfile(\\\n \"./Models/\" + self.mod_name + \".ckpt.meta\"):\n sess.run(tf.global_variables_initializer())\n return 0\n else:\n if self.gen_only:\n sess.run(tf.global_variables_initializer())\n self.load(sess)\n return 1", "def __init__(self, config):\n self.config = config\n etat.UsesTFSession.__init__(self)\n\n # Get path to model\n self.config.download_model_if_necessary()\n model_path = self.config.model_path\n\n # Load model\n self._prefix = \"main\"\n self._graph = etat.load_graph(model_path, prefix=self._prefix)\n self._sess = None\n\n # Load class labels\n labels_map = etal.load_labels_map(self.config.labels_path)\n self._class_labels = etal.get_class_labels(labels_map)\n self._num_classes = len(self._class_labels)\n\n # Get network\n network_name = self.config.network_name\n network_fn = nf.get_network_fn(\n network_name, num_classes=self._num_classes, is_training=False\n )\n self.img_size = network_fn.default_image_size\n\n # Get input operation\n self._input_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + self.config.input_name\n )\n\n # Get feature operation, if necessary\n features_name = None\n if self.config.generate_features:\n if self.config.features_name:\n features_name = self.config.features_name\n elif network_name in _DEFAULT_FEATURES_NAMES:\n features_name = _DEFAULT_FEATURES_NAMES[network_name]\n if features_name is not None:\n self._features_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + features_name\n )\n else:\n self._features_op = None\n\n # Get output operation\n if self.config.output_name:\n output_name = self.config.output_name\n else:\n output_name = _DEFAULT_OUTPUT_NAMES.get(network_name, None)\n if output_name is None:\n raise ValueError(\n \"`output_name` was not provided and network `%s` was not \"\n \"found in default outputs map\" % network_name\n )\n self._output_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + output_name\n )\n\n # Setup preprocessing\n self._transforms = self._make_preprocessing_fcn(\n network_name, self.config.preprocessing_fcn\n )\n self._preprocess = True\n\n self._last_features = None\n self._last_probs = None", "def __init__(self, model_path, img_width, img_height, gpu_fraction=1.0):\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=['input_1:0', 'cumsum_values_1:0'])\n\n self.img_width = img_width\n self.img_height = img_height", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize_variables(self):\n self.sess.run(self.init)", "def __init__(self, num_steps, model_load_path, num_test_rec):\n\n self.global_step = 0\n self.num_steps = num_steps\n self.num_test_rec = num_test_rec\n\n self.sess = tf.Session()\n self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph)\n\n if c.ADVERSARIAL:\n print 'Init discriminator...'\n self.d_model = DiscriminatorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.SCALE_CONV_FMS_D,\n c.SCALE_KERNEL_SIZES_D,\n c.SCALE_FC_LAYER_SIZES_D)\n\n print 'Init generator...'\n self.g_model = GeneratorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.FULL_HEIGHT,\n c.FULL_WIDTH,\n c.SCALE_FMS_G,\n c.SCALE_KERNEL_SIZES_G)\n\n print 'Init variables...'\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)\n self.sess.run(tf.global_variables_initializer())\n\n # if load path specified, load a saved model\n if model_load_path is not None:\n self.saver.restore(self.sess, model_load_path)\n print 'Model restored from ' + model_load_path", "def __init__(self):\n self.sess = tf.Session()\n vocab_path = os.path.join(params.data_dir, \"vocab%d\" % params.vocab_size)\n self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)\n self.model = model_utils.create_model(self.sess, True)\n self.model.batch_size = 1 # Respond 1 sentence at a time.", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def setup(self, params, training=True, **kwargs):\n\n tf.reset_default_graph()\n\n return super().setup(params=params, training=training, **kwargs)", "def _init_model(self, forrest):\n rels = self.get_rels(forrest)\n self._model = RDPModel(rels)", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def __init__(self, params=None):\n if isinstance(params, SSDParams):\n self.params = params\n else:\n self.params = SSDNet.default_params\n # if cfgs.DATA_FORMAT == \"NHWC\":\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None, None, 3],\n # name=\"input_images\")\n # else:\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3, None, None],\n # name=\"input_images\")\n\n # self.labels_batch = tf.placeholder(dtype=tf.int32, shape=[None, None, cfgs.NUM_CLASS+1], name=\"gt_labels\")\n # self.bboxes_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 4), name=\"gt_bboxes\")\n # self.scores_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, 1), name=\"gt_scores\")\n\n self.global_step = tf.train.get_or_create_global_step()", "def __init__(self, saved_model_path, pipeline_config_path):\r\n\r\n saved_model_path = os.path.realpath(saved_model_path)\r\n assert os.path.exists(saved_model_path)\r\n\r\n # Use tf2onnx to convert saved model to an initial ONNX graph.\r\n graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, \"serve\",\r\n [\"serving_default\"])\r\n log.info(\"Loaded saved model from {}\".format(saved_model_path))\r\n with tf.Graph().as_default() as tf_graph:\r\n tf.import_graph_def(graph_def, name=\"\")\r\n with tf_loader.tf_session(graph=tf_graph):\r\n onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)\r\n onnx_model = optimizer.optimize_graph(onnx_graph).make_model(\"Converted from {}\".format(saved_model_path))\r\n self.graph = gs.import_onnx(onnx_model)\r\n assert self.graph\r\n log.info(\"TF2ONNX graph created successfully\")\r\n\r\n # Fold constants via ONNX-GS that TF2ONNX may have missed.\r\n self.graph.fold_constants()\r\n \r\n # Pipeline config parsing.\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:\r\n text_format.Merge(f.read(), pipeline_config)\r\n\r\n # If your model is SSD, get characteristics accordingly from pipeline.config file.\r\n if pipeline_config.model.HasField(\"ssd\"):\r\n # Getting model characteristics.\r\n self.model = str(pipeline_config.model.ssd.feature_extractor.type)\r\n self.height = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width)\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold)\r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.max_detections_per_class)\r\n\r\n # If your model is Faster R-CNN get it's characteristics from pipeline.config file.\r\n elif pipeline_config.model.HasField(\"faster_rcnn\"): \r\n self.model = str(pipeline_config.model.faster_rcnn.feature_extractor.type) \r\n # There are two types of image_resizers, select accordingly from pipeline.config file.\r\n if pipeline_config.model.faster_rcnn.image_resizer.HasField(\"fixed_shape_resizer\"):\r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.width)\r\n elif pipeline_config.model.faster_rcnn.image_resizer.HasField(\"keep_aspect_ratio_resizer\"): \r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.keep_aspect_ratio_resizer.max_dimension)\r\n self.width = self.height\r\n else:\r\n log.info(\"Image resizer config is not supported\")\r\n sys.exit(1)\r\n\r\n # Getting model characteristics\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_score_threshold) \r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.faster_rcnn.first_stage_max_proposals)\r\n self.initial_crop_size = int(pipeline_config.model.faster_rcnn.initial_crop_size)\r\n self.second_score_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.score_threshold)\r\n self.second_iou_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.iou_threshold)\r\n\r\n else: \r\n log.info(\"Given pipeline.config file is not supported\")\r\n sys.exit(1)\r\n\r\n #print(self.model)\r\n #print(self.height)\r\n #print(self.width)\r\n #print(self.first_stage_nms_score_threshold)\r\n #print(self.first_stage_nms_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n #print(self.initial_crop_size)\r\n #print(self.second_score_threshold)\r\n #print(self.second_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n\r\n self.batch_size = None", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n\n self._add_placeholders()\n\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._add_seq2seq()\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n\n if self._mode == 'train':\n self._add_train_op()\n\n self._summaries = tf.summary.merge_all()\n\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)", "def create(self):\n # 1st Layer: Conv -> norm -> ReLu\n conv1 = self.conv(x=self.X, stride_y=1, stride_x=1, padding='SAME', name='conv1')\n norm1 = lrn(conv1, 2, 1e-04, 0.75, name='norm1')\n # Apply relu function\n relu1 = tf.nn.relu(norm1)\n\n # 2st Layer: Conv -> norm -> ReLu\n conv2 = self.conv(x=relu1, stride_y=1, stride_x=1, padding='SAME', name='conv2')\n norm2 = lrn(conv2, 2, 1e-04, 0.75, name='norm2')\n # Apply relu function\n relu2 = tf.nn.relu(norm2)\n\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 3st Layer: Conv -> norm -> ReLu\n conv3 = self.conv(x=pool2, stride_y=1, stride_x=1, padding='SAME', name='conv3')\n norm3 = lrn(conv3, 2, 1e-04, 0.75, name='norm3')\n # Apply relu function\n relu3 = tf.nn.relu(norm3)\n\n # 4st Layer: Conv -> norm -> ReLu\n conv4 = self.conv(x=relu3, stride_y=1, stride_x=1, padding='SAME', name='conv4')\n norm4 = lrn(conv4, 2, 1e-04, 0.75, name='norm4')\n # Apply relu function\n relu4 = tf.nn.relu(norm4)\n\n pool4 = tf.nn.max_pool(relu4, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 5st Layer: Conv -> norm -> ReLu\n conv5 = self.conv(x=pool4, stride_y=1, stride_x=1, padding='SAME', name='conv5')\n norm5 = lrn(conv5, 2, 1e-04, 0.75, name='norm5')\n # Apply relu function\n relu5 = tf.nn.relu(norm5)\n\n # 6st Layer: Conv -> norm -> ReLu\n conv6 = self.conv(x=relu5, stride_y=1, stride_x=1, padding='SAME', name='conv6')\n norm6 = lrn(conv6, 2, 1e-04, 0.75, name='norm6')\n # Apply relu function\n relu6 = tf.nn.relu(norm6)\n\n pool6 = tf.nn.avg_pool(relu6, ksize=[1, 4, 4, 1],\n strides=[1, 4, 4, 1],\n padding='SAME')\n\n flattened = tf.reshape(pool6, [-1, 128 * 4])\n self.fc7 = self.fc(flattened, name='fc7')", "def _setup_init(self):\n with tf.variable_scope(\"output\", reuse=True):\n assert self.q_values is not None\n self.policy_proba = tf.nn.softmax(self.q_values)", "def build_inference_graph(self):\n self.build_train_graph()", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def build_graph(self, save_model_path):\n if os.path.exists(\"{}.meta\".format(save_model_path)):\n logger.info(\"Graph existed, ready to be reloaded...\")\n else:\n logger.info(\"No graph can be loaded, so create a new graph...\")\n tf.reset_default_graph()\n # placeholders\n x = self.neural_net_image_input((32, 32, 3))\n y = self.neural_net_label_input(10)\n keep_prob = self.neural_net_keep_prob_input()\n\n # model\n logits_out = self.conv_net(x, keep_prob)\n\n # Name logits_out\n logits_out = tf.identity(logits_out, name='logits')\n\n # loss and optimizer\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_out, labels=y), name='cost')\n optimzer = tf.train.AdamOptimizer(name='optimizer').minimize(loss)\n\n # Accuracy\n correct_pred = tf.equal(tf.argmax(y, axis=1), tf.argmax(logits_out, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\n # print(type(tf.Variable(1)))\n saver = tf.train.Saver()\n if not os.path.exists('./savedModel'):\n os.mkdir('./savedModel')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.save(sess, './savedModel/cnn-model')", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def model_initializer():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n # model.add(tf.keras.layers.Dense(128, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(64, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(32, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n\n model.compile(optimizer='rmsprop',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def reset(self):\n self.close()\n self._sess = tf.Session(graph=self._graph)\n self._sess.run(self._initializer)", "def _init_tf(self):\n assert not self.tf_init_done, \\\n \"this class is not designed to be initialised twice\"\n self.sess = tf.get_default_session()\n self.optimiser = tf.train.AdamOptimizer(learning_rate=self.lr)\n\n # maps problem names to (obs var, q-value var) tuples\n self.obs_qv_inputs = {}\n losses = []\n loss_parts = None\n batch_sizes = []\n for problem in self.problems:\n this_obs_var, this_q_values, this_loss, this_loss_parts \\\n = self._instantiate_net(problem)\n self.obs_qv_inputs[problem.name] = (this_obs_var, this_q_values)\n this_batch_size = tf.shape(this_obs_var)[0]\n losses.append(this_loss)\n batch_sizes.append(tf.cast(this_batch_size, tf.float32))\n if loss_parts is None:\n loss_parts = this_loss_parts\n else:\n # we care about these parts because we want to display them to\n # the user (e.g. how much of my loss is L2 regularisation\n # loss?)\n assert len(loss_parts) == len(this_loss_parts), \\\n 'diff. loss breakdown for diff. probs. (%s vs %s)' \\\n % (loss_parts, this_loss_parts)\n # sum up all the parts\n new_loss_parts = []\n for old_part, new_part in zip(loss_parts, this_loss_parts):\n assert old_part[0] == new_part[0], \\\n \"names (%s vs. %s) don't match\" % (old_part[0],\n new_part[0])\n to_add = new_part[1] * tf.cast(this_batch_size, tf.float32)\n new_loss_parts.append((old_part[0], old_part[1] + to_add))\n loss_parts = new_loss_parts\n self.op_loss \\\n = sum(l * s for l, s in zip(losses, batch_sizes)) \\\n / sum(batch_sizes)\n # this is actually a list of (name, symbolic representation) pairs for\n # components of the loss\n self.loss_part_ops = [(name, value / sum(batch_sizes))\n for name, value in loss_parts]\n\n # Next bit hairy because we want combined grads (and also want to split\n # them out for TensorBoard to look at). Really this is similar to\n # self.op_train = self.optimiser.minimize(loss).\n params = self.weight_manager.all_weights\n # do a check that set(params) is the same as\n param_set = set(params)\n for problem in self.problems:\n their_param_set = set(problem.policy.get_params(trainable=True))\n assert their_param_set == param_set, \\\n \"policy for %s has weird params\" % problem.name\n\n grads_and_vars = self.optimiser.compute_gradients(\n self.op_loss, var_list=params)\n # see https://stackoverflow.com/a/43486487 for gradient clipping\n gradients, variables = zip(*grads_and_vars)\n gradients = list(gradients)\n # for grad, var in grads_and_vars:\n # gradients[0] = tf.Print(gradients[0], [tf.norm(grad), tf.norm(var)], 'grad/var norm for %s:' % var.name)\n grads_and_vars = zip(gradients, variables)\n self.op_train = self.optimiser.apply_gradients(\n grads_and_vars=grads_and_vars)\n for g, v in grads_and_vars:\n tf.summary.histogram(\n 'weight-grads/' + v.name, g, collections=['sl-hists'])\n for slot in self.optimiser.get_slot_names():\n slot_var = self.optimiser.get_slot(v, slot)\n if slot_var is not None:\n dest_name = 'slots-' + slot + '/' + v.name\n tf.summary.histogram(\n dest_name, slot_var, collections=['sl-hists'])\n\n # \"weights\" is probably set by some code somewhere deep in RLLab\n # TODO: this is probably not the best idea. Maybe do weight hist stuff\n # *here*?\n weight_op = tf.summary.merge_all('weights')\n # 'summaries_f_prob' (for activations) is set up in\n # CategoricalMLPPolicy.__init__. Again I stuck it deep in RLLab because\n # I'm an idiot.\n act_op = tf.summary.merge_all('sl-activations')\n tf.summary.merge([act_op, weight_op], collections=['sl-hists'])\n self.op_summary = tf.summary.merge_all('sl-hists')\n\n # tensorboard ops\n self._log_ops = {}\n\n self.sess.run(tf.global_variables_initializer())\n\n self.tf_init_done = True", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def __init__(self, sess, save_folder, file_name, **kwargs):\n\n tf.logging.info('Building graph for low dimensional score metric')\n self._build_graph(**kwargs)\n\n self.build_summaries()\n tf.logging.info('Summary operator made')\n\n self.sess = sess\n self.initialize_model(save_folder, file_name, sess)\n tf.logging.info('Model initialized')", "def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess", "def _initialize_local_and_global_variables(self):\n variables_initialization_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(variables_initialization_op)", "def __init__(self, MY_GRAPH_PATH):\n self.graph = tf.Graph()\n\n graph_def = None\n with tf.gfile.FastGFile(MY_GRAPH_PATH, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)", "def build_tf_graph(self):\n raise NotImplementedError", "def compile(self, seed=42):\n ops.reset_default_graph()\n self._log_params() # Small trick to get all the variables and log them\n # Create the graph object\n with tf.device(\"/gpu:0\"):\n logger.info(\"Building graph...\")\n tf.set_random_seed(seed)\n self.global_step = tf.get_variable(name=\"global_step\",\n shape=[],\n dtype='int32',\n initializer=tf.constant_initializer(0),\n trainable=False)\n self._create_placeholders()\n self._setup_graph_def()\n\n self._add_scalar_summary(self.loss)\n if self.eval_metric is not None:\n self._add_scalar_summary(self.eval_metric)\n self._is_graph_build = True", "def __init__(self, state_size, action_size, scope='global', layer_size=np.array([400, 300])):\n self.state_size = state_size\n self.action_size = action_size\n self.scope = scope\n with tf.variable_scope(scope):\n self.inputs = tf.placeholder(shape=[None, state_size], dtype=tf.float32)\n self.layers = [self.inputs]\n for i in range(len(layer_size)):\n self.layers.append(slim.fully_connected(self.layers[i], int(layer_size[i]), activation_fn=tf.nn.relu))\n\n self.policyLayer = slim.fully_connected(self.layers[-1], action_size, activation_fn=tf.nn.tanh)\n # Get the index of the highest output from the neural network\n self.maxOutputNode = tf.argmax(self.policyLayer, 1)", "def __init__(self, model='facenet-20180402-114759.pb'):\n print('Load Frozen Graph')\n\n with tf.gfile.FastGFile(os.path.join(os.path.dirname(__file__), \"weights\", model),\n 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n self.graph = tf.get_default_graph()\n\n print('Ended loading frozen graph')", "def __init__(self, model_path, gpu_fraction=1.0,\n input_name = 'input_1:0',\n output_name = 'output_node0:0',\n optimize = True,\n optimizer_args = None):\n\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n\n if optimize:\n if type(output_name) == list:\n sensitive_nodes = output_name\n else:\n sensitive_nodes = [output_name]\n graph_def = optimizeGraph(graph_def,\n sensitive_nodes,\n optimizer_args)\n if type(output_name) == list:\n return_elements = [input_name, *output_name]\n tensors = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n # The first is an input\n self.input_tensor = tensors[0]\n # The rest are outputs\n self.output_tensor = tensors[1:]\n else:\n return_elements = [input_name, output_name]\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n\n self.input_shape = self.input_tensor.get_shape().as_list()", "def fit_eval(self, sess):\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allow_growth = True\n self.sess = tf.Session(config=tfconfig)\n self.sess.run(tf.global_variables_initializer())\n self.new_saver=tf.train.import_meta_graph(self.meta_graph_path)\n self.new_saver.restore(sess,self.model_path)\n #graph = tf.get_default_graph()\n self.X_inputs=tf.get_collection(\"model.X_inputs\")[0]\n self.y_inputs=tf.get_collection(\"model.y_inputs\")[0]\n self.y_pred_meta=tf.get_collection(\"model.y_pred\")[0]\n self.lr=tf.get_collection(\"lr\")[0]\n self.batch_size=tf.get_collection(\"batch_size\")[0]\n self.keep_prob=tf.get_collection(\"keep_prob\")[0]\n self.attention=tf.get_collection(\"attention\")[0]\n self.correct_prediction_bilstm= tf.equal(tf.cast(tf.argmax(self.attention, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.correct_prediction_attention = tf.equal(tf.cast(tf.argmax(self.y_pred_meta, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.accuracy_attention = tf.reduce_mean(tf.cast(self.correct_prediction_attention, tf.float32))\n self.accuracy_bilstm = tf.reduce_mean(tf.cast(self.correct_prediction_bilstm, tf.float32))\n saver = tf.train.Saver(max_to_keep=3)\n saver.restore(sess, tf.train.latest_checkpoint(self.model.checkpoint_path))\n X_batch, y_batch = self.batch_gen.__next__()\n test_fetches = [self.attention, self.accuracy_attention, self.accuracy_bilstm, self.y_pred_meta]\n feed_dict = {self.X_inputs:X_batch, self.y_inputs:y_batch, self.lr:self._lr, self.batch_size:10, self.keep_prob:1.0}\n _att_pred, _att_acc, _bilstm_acc , _bilstm_pred = sess.run(test_fetches, feed_dict)\n print(_att_pred,_bilstm_pred, _att_acc, _bilstm_acc)\n return _att_pred,_bilstm_pred, _att_acc, _bilstm_acc", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build_graph(self):\n train_graph = tf.Graph()\n opts = self._options\n with train_graph.as_default():\n self.__inputs, self.__doc_inputs, self.__labels, self.__lr = self._get_inputs()\n embed, word_embeddings, combined_embed_vector_length = self._get_embedding_layer(\n self.__inputs, self.__doc_inputs)\n\n norm_w = tf.sqrt(tf.reduce_sum(tf.square(word_embeddings), 1, keep_dims=True))\n self.__normalized_word_embeddings = word_embeddings / norm_w\n\n weights = tf.Variable(\n tf.truncated_normal((self.vocab_size, combined_embed_vector_length),\n stddev=1.0 / math.sqrt(combined_embed_vector_length))\n )\n biases = tf.Variable(tf.zeros(self.vocab_size))\n\n if opts.loss == 'softmax':\n loss = tf.nn.sampled_softmax_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"Softmax loss\", loss)\n else:\n loss = tf.nn.nce_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"NCE loss\", loss)\n\n self.__cost = tf.reduce_mean(loss)\n\n if opts.train_method == 'Adam':\n self.__optimizer = tf.train.AdamOptimizer(self.__lr).minimize(self.__cost)\n else:\n self.__optimizer = tf.train.GradientDescentOptimizer(self.__lr).minimize(self.__cost)\n\n self.__summary = tf.summary.merge_all()\n\n self._session = tf.Session(graph=train_graph)\n self.saver = tf.train.Saver()\n return self", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def __init__(self):\n self._graph = tf.Graph()\n self._session = tf.compat.v1.Session(graph=self._graph)\n\n # This lock is for multi-threaded contexts where multiple threads\n # share the same EvalSavedModel.\n #\n # Locking is required in the case where there are multiple threads using\n # the same EvalMetricsGraph. Because the metrics variables are part of the\n # session, and all threads share the same session, without a lock, the\n # \"reset-update-get\" steps may not be atomic and there can be races.\n #\n # Having each thread have its own session would also work, but would\n # require a bigger refactor.\n # TODO(b/131727905): Investigate whether it's possible / better to have\n # each thread have its own session.\n self._lock = threading.Lock()\n\n # Variables that need to be populated.\n\n # The names of the metric.\n self._metric_names = []\n\n # Ops associated with reading and writing the metric variables.\n self._metric_value_ops = []\n self._metric_update_ops = []\n self._metric_variable_assign_ops = []\n\n # Nodes associated with the metric variables.\n self._metric_variable_nodes = []\n\n # Placeholders and feed input for the metric variables.\n self._metric_variable_placeholders = []\n self._perform_metrics_update_fn_feed_list = []\n self._perform_metrics_update_fn_feed_list_keys = []\n\n # OrderedDicts that map features, predictions, and labels keys to their\n # tensors.\n self._features_map = {}\n self._predictions_map = {}\n self._labels_map = {}\n\n # Ops to set/update/reset all metric variables.\n self._all_metric_variable_assign_ops = None\n self._all_metric_update_ops = None\n self._reset_variables_op = None\n\n # Callable to perform metric update.\n self._perform_metrics_update_fn = None\n\n # OrderedDict produced by graph_ref's load_(legacy_)inputs, mapping input\n # key to tensor value.\n self._input_map = None\n\n self._batch_size = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size'))\n self._batch_size_failed = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size_failed'))\n\n try:\n self._construct_graph()\n except (RuntimeError, TypeError, ValueError,\n tf.errors.OpError) as exception:\n general_util.reraise_augmented(exception, 'Failed to create graph.')", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def load_resnet(self, resnet_dir, keep_last=False):\n ckpt = tf.train.latest_checkpoint(resnet_dir)\n with tf.Session(config=self.config) as sess:\n # init model\n init = [tf.global_variables_initializer(), tf.local_variables_initializer()]\n sess.run(init)\n if keep_last:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name]\n else:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name\n and 'conv6' not in v.name]\n loader = tf.train.Saver(var_list=restore_var)\n # load model\n self.load(ckpt, sess, loader)", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def __init__(self, model):\r\n self._tensorflow_session = model._tensorflow_session\r\n self._model = model", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _setup(self):\n\n # caffe-tensorflow/convert.py can only run with Python2. Since the default encoding format of Python2 is ASCII\n # but the default encoding format of Python3 is UTF-8, it will raise an error without 'encoding=\"latin1\"'\n weight_dict = np.load(self.vgg16_path, encoding=\"latin1\").item()\n\n scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3',\n 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3']\n for scope in scopes:\n with tf.variable_scope(scope.split('_')[0] + '/' + scope, reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w_init_op = weights.assign(weight_dict[scope]['weights'])\n b_init_op = biases.assign(weight_dict[scope]['biases'])\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc6', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc6']['weights']\n b = weight_dict['fc6']['biases']\n w = np.reshape(w, (7, 7, 512, 4096))\n w = w[0:-1:2, 0:-1:2, :, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc7', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc7']['weights']\n b = weight_dict['fc7']['biases']\n w = np.reshape(w, (1, 1, 4096, 4096))\n w = w[:, :, 0:-1:4, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)", "def __init__(self, in_seq_length, out_seq_length, hidden_dim,\n n_epochs=1500, learning_rate=0.0001,\n save_file='./forecastnet.ckpt', model='dense'):\n # Initialize variables passed\n self.in_seq_length =in_seq_length\n self.out_seq_length = out_seq_length\n self.hidden_dim = hidden_dim\n self.n_epochs = n_epochs\n self.learning_rate = learning_rate\n self.save_file = save_file\n self.model = model\n\n # Reset the default graph\n tf.reset_default_graph()\n\n # Set random seed to keep consistent results\n # tf.set_random_seed(1)\n\n # Create the placeholders for the TensorFlow graph\n self.X, self.Y, self.is_training = self.create_placeholders()\n\n # Build the TensorFlow graph\n self.build_graph()\n\n # Define the tensorflow optimizer. Use an AdamOptimizer.\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n\n # Print the number of trainable parameters of the model\n print('Trainable variables = ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))\n print('')", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def _build_graph(self, inputs):\n\n # inputs contains a list of input variables defined above\n input_from_sensor1, input_from_sensor2, label = inputs\n print \"ok\"\n print input_from_sensor1\n # In tensorflow, inputs to convolution function are assumed to be\n # NHWC. Add a single channel here.\n #image = tf.expand_dims(image, 3)\n\n #image = image * 2 - 1 # center the pixels values at zero\n # The context manager `argscope` sets the default option for all the layers under\n # this context. Here we use 32 channel convolution with shape 3x3\n\n sensor1 = Sequential('sensor1', input_from_sensor1) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)() \n\n print sensor1\n\n sensor2 = Sequential('sensor2', input_from_sensor2) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n output = Connect('cloud', [sensor1, sensor2], \"inner_product\") \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n tf.nn.softmax(output, name='prob') # a Bx10 with probabilities\n\n #g = tf.get_default_graph()\n #for v in g.as_graph_def().node:\n # print v.name\n\n # a vector of length B with loss of each sample\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss\n\n correct = tf.cast(tf.nn.in_top_k(output, label, 1), tf.float32, name='correct')\n accuracy = tf.reduce_mean(correct, name='accuracy')\n\n # This will monitor training error (in a moving_average fashion):\n # 1. write the value to tensosrboard\n # 2. write the value to stat.json\n # 3. print the value after each epoch\n train_error = tf.reduce_mean(1 - correct, name='train_error')\n summary.add_moving_summary(train_error, accuracy)\n\n # Use a regex to find parameters to apply weight decay.\n # Here we apply a weight decay on all W (weight matrix) of all fc layers\n wd_cost = tf.multiply(1e-5,\n regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n\n self.cost = tf.add_n([wd_cost, cost], name='total_cost')\n\n summary.add_moving_summary(cost, wd_cost, self.cost)\n\n # monitor histogram of all weight (of conv and fc layers) in tensorboard\n summary.add_param_summary(('.*/W', ['histogram', 'rms']))" ]
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365", "0.6238946", "0.6238248", "0.6228969", "0.62260884", "0.62216014", "0.61992395", "0.6188734", "0.6182232", "0.6179242", "0.6167892", "0.6159257", "0.6145693", "0.61373615", "0.61370456", "0.6135909", "0.61055654", "0.6098847", "0.60771847", "0.6072279", "0.6068658", "0.6060275", "0.60361105", "0.60349673", "0.6022777", "0.59995234", "0.5975665", "0.59745455", "0.59699357", "0.59679097", "0.59655076", "0.5944577", "0.59443724", "0.5942477", "0.5916095", "0.5910781", "0.5903678", "0.59029704", "0.5899744", "0.5894725", "0.5892079", "0.58791643", "0.5875119", "0.5865222", "0.585884", "0.58562565", "0.5853644", "0.58522123", "0.5844606", "0.5841884", "0.5839001", "0.5832439", "0.5830157", "0.582558", "0.58121085", "0.58112323", "0.5808803", "0.5806357", "0.580387", "0.57945246", "0.5793703", "0.5792321", "0.5785907", "0.57851183", "0.57754564", "0.57655543", "0.57646304", "0.57595575", "0.57500404", "0.57434803", "0.574348", "0.57424086", "0.5735521", "0.57310385", "0.5730755", "0.57296336", "0.5727137", "0.57193434", "0.57140994", "0.57094336", "0.570941" ]
0.0
-1
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')", "def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])", "def load_weights(self, filepath):\n self.model.load_weights(filepath)", "def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_initial_weights(self, sess, weights_path, SKIP_LAYER):\r\n # Load the weights into memory\r\n weights_dict = np.load(weights_path, encoding='bytes').item()\r\n\r\n # list of all assignment operators\r\n # Loop over all layer names stored in the weights dict\r\n for op_name in weights_dict:\r\n\r\n # Check if layer should be trained from scratch\r\n if op_name not in SKIP_LAYER:\r\n\r\n with tf.variable_scope('model/source/' + op_name, reuse=True):\r\n\r\n # Assign weights/biases to their corresponding tf variable\r\n for data in weights_dict[op_name]:\r\n\r\n # Biases\r\n if len(data.shape) == 1:\r\n var = tf.get_variable('biases', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))\r\n\r\n # Weights\r\n else:\r\n var = tf.get_variable('weights', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_weights(self, file):\n self.model.load_weights(file)\n return", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_model_weights(self, filename):\n self.model.load_weights(filename)", "def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])", "def load_tf_weights_in_bert(model, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load(self, filename):\n self.model.load_weights(filename)", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_ckpt(model,\n weight_path,\n **kargs):\n #model.set_state_dict(state_dict)\n\n if not osp.isfile(weight_path):\n raise IOError(f'{weight_path} is not a checkpoint file')\n #state_dicts = load(weight_path)\n\n logger = get_logger(\"paddlevideo\")\n state_dicts = paddle.load(weight_path)\n if \"VisionTransformer\" in str(model): # For TimeSformer case\n tmp = pretrain_vit_param_trans(model, state_dicts, kargs['num_patches'], kargs['seg_num'], kargs['attention_type'])\n else:\n tmp = {}\n total_len = len(model.state_dict())\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n for item in tqdm(model.state_dict(), total=total_len, position=0):\n name = item\n desc.set_description('Loading %s' % name)\n if name not in state_dicts: # Convert from non-parallel model\n if str('backbone.' + name) in state_dicts:\n tmp[name] = state_dicts['backbone.' + name]\n else: # Convert from parallel model\n tmp[name] = state_dicts[name]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n model.set_state_dict(tmp)", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, checkpoint_path, silent = False):\n ckc = CheckpointCache(checkpoint_path)\n\n if not self.built:\n dymmy_inputs = np.array([[0,1,2]])\n self([dymmy_inputs])\n \n symbolic_weights = self.trainable_weights + self.non_trainable_weights\n \n variable_keys = [self._clean_weight_name(symbolic_weight.name) for symbolic_weight in symbolic_weights]\n variable_keys = [self._convert_variable_name(key) for key in variable_keys]\n\n unloaded_keys = set(ckc.keys()) - set(variable_keys)\n if not silent:\n print('unused keys:', unloaded_keys)\n \n values = [ckc.get_values(key) for key in variable_keys]\n \n name_value_pair = []\n\n for weight, value in zip(symbolic_weights, values):\n if weight.shape != value.shape:\n raise ValueError(f'The shape of {weight.name} is {weight.shape} but shape from checkpoint is {value.shape}.')\n if weight.dtype != value.dtype:\n raise ValueError(f'The type of {weight.name} is {weight.dtype} but type from checkpoint is {value.dtype}.')\n \n name_value_pair.append((weight, value))\n \n K.batch_set_value(name_value_pair)\n \n return unloaded_keys", "def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)", "def load_checkpoint(filename: str) -> CheckpointData:\n return torch.load(filename)", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_weights_file(self, file_path):\n\n # Load the weights\n self._cnn_model.load_weights(file_path)", "def load_checkpoint(filename, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n return torch.load(filename)\r\n else:\r\n return torch.load(filename, map_location=lambda storage, loc: storage)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_weights(self, the_path):\n self.model.load_state_dict(torch.load(the_path))", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_initial_weights(self, session, SKIP_LAYER=[]):\n if SKIP_LAYER:\n self.SKIP_LAYER = SKIP_LAYER\n \n layer_params = np.load(self.WEIGHTS_PATH, encoding = \"latin1\").item()\n \n # Loop over all layer names stored in the weights dict\n for op_name in layer_params:\n # Check if the layer is one of the layers that should be reinitialized\n if op_name not in self.SKIP_LAYER:\n with tf.variable_scope(op_name, reuse = True):\n # Loop over list of weights/biases and assign them to their corresponding tf variable\n print(\"load layer params:%s\" % op_name)\n for key in layer_params[op_name]:\n data = layer_params[op_name][key]\n # Biases\n if len(data.shape) == 1:\n var = tf.get_variable('biases', trainable = False)\n session.run(var.assign(data))\n # Weights\n else:\n var = tf.get_variable('weights', trainable = False)\n session.run(var.assign(data))", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def _load_weights(self):\n self.npz_weights = np.load(self._weight_file)\n self._load_byte_embedding()\n self._load_cnn_weights()\n self._load_highway()\n self._load_projection()", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def load_graph(filename):\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\ttf.import_graph_def(graph_def, name='')", "def load_pretrained_net_weights(net, ckpt_path):\n print(\"Loading Model: \", ckpt_path)\n print('')\n\n net.load_weights(ckpt_path).expect_partial()", "def load_model(sess, meta_file, checkpoint_file):\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n \n configs = tf.get_collection('configs')\n pvars = tf.get_collection('placeholders')\n \n model_settings = dict()\n for c in configs:\n name = c.name.split(':')[0]\n model_settings[name] = sess.run(c)\n \n model_vars = dict()\n for p in pvars:\n name = p.name.split(':')[0]\n model_vars[name] = p\n model_vars['probs'] = tf.get_collection('probs')[0]\n \n return model_settings, model_vars", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_pretrained_weights(model, weight_path):\n checkpoint = load_checkpoint(weight_path)\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n model_dict = model.state_dict()\n new_state_dict = OrderedDict()\n matched_layers, discarded_layers = [], []\n\n for k, v in state_dict.items():\n if k.startswith('module.'):\n k = k[7:] # discard module.\n\n if k in model_dict and model_dict[k].size() == v.size():\n new_state_dict[k] = v\n matched_layers.append(k)\n else:\n discarded_layers.append(k)\n\n model_dict.update(new_state_dict)\n model.load_state_dict(model_dict)\n\n if len(matched_layers) == 0:\n warnings.warn(\n 'The pretrained weights \"{}\" cannot be loaded, '\n 'please check the key names manually '\n '(** ignored and continue **)'.format(weight_path)\n )\n #else:\n #print(\n # 'Successfully loaded pretrained weights from \"{}\"'.\n # format(weight_path)\n #)\n #if len(discarded_layers) > 0:\n # print(\n # '** The following layers are discarded '\n # 'due to unmatched keys or layer size: {}'.\n # format(discarded_layers)\n # )", "def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)", "def load_weights(self, file_path):\n self.model.load_weights(file_path + '/policy_network.h5')\n print(\"\\nrestored weights of the policy network.\\n\")", "def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_weights(self, weights):\n weight = np.load(weights)\n return weight", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n pass\n else:\n raise TypeError('pretrained must be a str or None')", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_weights(self, weights):\n\n i = 0\n for l in range(1, self.num_layers()):\n for n in range(self.get_layer(l).num_nodes):\n for w in range(len(self.get_node_with_layer(l, n).weights)):\n self.get_node_with_layer(l, n).weights[w] = weights[i]\n i += 1", "def restore(self, weights_file):\r\n\r\n self.model.load_weights(weights_file, by_name=True)", "def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_weights(self):\n try:\n print('loading weights from {}'.format(self.cfg.class_model_dir))\n self.load_state_dict(torch.load(self.cfg.class_model_dir + self.class_model_name + '.pth'))\n except Exception as e:\n print(\"load weights exception: {}\".format(e))", "def load_model_states_from_checkpoint(model, filename, tag, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n checkpoint = torch.load(filename)\r\n else:\r\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\r\n model.load_state_dict(checkpoint[tag])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_model(self, file_name):\n\t\tself.model.load_weights(file_name)", "def init_weights(self, pretrained=None, strict=True):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=strict, logger=logger)\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. But received {type(pretrained)}.')", "def load_weights(self, file_path, format=None, in_order=True, skip=False):\n _load_weights(self, file_path, format, in_order, skip)", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def resnet_init_from_checkpoint_fn(checkpoint):\n logging.info('Initializing model weights from %s', checkpoint)\n assignment_map = {}\n resnet_scope = _get_resnet_scope()\n for var in contrib_framework.get_variables(\n scope=resnet_scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n if 'dense' not in var.op.name:\n # Remove the parent scope prefix.\n name_in_ckpt = var.op.name.replace(resnet_scope, 'resnet_model/')\n assignment_map[name_in_ckpt] = var\n tf.train.init_from_checkpoint(checkpoint, assignment_map)" ]
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.6727852", "0.6713089", "0.6706459", "0.6683782", "0.6658891", "0.66465485", "0.6617412", "0.6616446", "0.6595445", "0.6595445", "0.65860444", "0.6582845", "0.65760124", "0.6567407", "0.6567407", "0.655", "0.654494", "0.654308", "0.65425074", "0.65225554", "0.6519562", "0.6502056", "0.64976", "0.6494225", "0.648421", "0.6479", "0.6476018", "0.64715683", "0.64661586", "0.64661586", "0.64654607", "0.64533824", "0.6452823", "0.64477986", "0.64477986", "0.64337814", "0.64327794", "0.6423933", "0.6423651", "0.6417821", "0.63964593", "0.63902766", "0.6384771", "0.63653183", "0.63635445", "0.635375", "0.63508856", "0.6348675", "0.6344284", "0.6343296", "0.6342838", "0.63309", "0.6330147", "0.6329762", "0.63064724", "0.6300289", "0.62704086", "0.62672496", "0.62668973", "0.6265366", "0.62633765", "0.6259392", "0.6257295", "0.6255254", "0.62301844", "0.6229952", "0.6226355", "0.62140304", "0.62128186", "0.6211067", "0.61976856", "0.61976856", "0.61963856", "0.6194719", "0.6172211", "0.61630845", "0.61520475", "0.61514324" ]
0.77680707
1
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_model_params(model):\n for param, value in zip(model.param_names, model.parameters):\n print(\"{:0.4f}\\t{}\".format(value, param))", "def print_params(self):\n print(self._list_params())", "def _get_parameters(self) -> list:\n return self.parameters", "def get_params(self):\n return []", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def get_params(self):\n return list(self.params.values())", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def get_params_list():\n return common.QOL_PARAMS", "def get_resource_params():\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def model_parameters(self) -> Iterator[Tuple[str, torch.Tensor]]:\n return self._model.named_parameters()", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def param(self):\n return []", "def param(self):\n return []", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def parameter_lists_for_model(self, model: AbstractPriorModel) -> List[float]:\n if self.is_path_kwargs:\n paths = model.all_paths\n else:\n paths = model.all_names\n\n return self.parameter_lists_for_paths(paths)", "def parameters(self):\n return self._params", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def get_model_params(self):\n return self._model_params", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def param(self):\r\n\r\n return []", "def get_params(self):\n pass", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameters(self):\n return self.pars", "def parameters(self):\n return self._params", "def param(self):\r\n return []", "def help(cls):\n print(cls._LIST_PARAMETERS)", "def get_params(self):", "def parameters(self):\n return self.vars", "def get_params(self):\n raise NotImplementedError", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def list_param(command):\n namespace = app.main(command)\n assert namespace.command == 'lp' or namespace.command == \"listparam\"", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def parameters(self):\n return [o.parameters for o in self.obs]", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def show_params(self):\n \n return self.params[self.profile]", "def params(self):\n\t\treturn self.params_", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def parameters(self):\n pass", "def params(self):\n return self._pars", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def params(self) -> List[ParamSpec]:\n return self._params", "def parameters(self):\n\n return self._parameters", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def get_params(self):\n\n return self.params_", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def _list_params(self, the_list: List):\n return [p for e in the_list for p in self._params(e)]", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def get(self):\n return self.params", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def get_params_iter(self):\n return []", "def get_embed_params(model) -> List:\r\n return [param for name, param in model.named_parameters() if \"embed\" in name]", "def parameters(self):\n return self.trainer_parameters", "def parameters(self):\n return self.trainer_parameters", "def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters", "def parameters(self):", "def params():\n raise NotImplementedError", "def get_param_names(self):\n return list(self.params.keys())", "def param_values(self):\n return self._param_values", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def get(self, *args):\n return _libsbml.ListOfParameters_get(self, *args)", "def params(self) -> Munch:\n return self._params", "def get_parameters(self):\n return self.context.params", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_params(self):\n params = []\n params.append(('rows', str(self._rows)))\n if self._page > 1:\n params.append(('start', str((self._page - 1) * self._rows)))\n\n return params", "def parameters(self):\n return self._default_params", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> List[str]:", "def get_parList(self):\n parList = []\n for modelName in self._modelList:\n model = self.__modelDict[modelName]\n modelParDict = model.parFitDict\n for parName in modelParDict.keys():\n parList.append(modelParDict[parName][\"value\"])\n return parList" ]
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.6712457", "0.6694902", "0.66933364", "0.6691488", "0.66579175", "0.665548", "0.6649228", "0.66487366", "0.66479886", "0.66403294", "0.66145444", "0.6569835", "0.6568972", "0.65615726", "0.65540904", "0.6544046", "0.6527509", "0.6527509", "0.6527509", "0.6526537", "0.65207803", "0.65207803", "0.6512", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6486162", "0.64849406", "0.64796937", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6457764", "0.64506465", "0.64479744", "0.6446706", "0.6440776", "0.6440776", "0.6440776", "0.6433494", "0.63789225", "0.6367834", "0.6361364", "0.632063", "0.6302258", "0.6289967", "0.6287379", "0.62705636", "0.6267131", "0.62598896", "0.62573093", "0.6242155", "0.6225191", "0.62155", "0.62155", "0.6212865", "0.62126464", "0.6211837", "0.61864233", "0.6184046", "0.6182091", "0.6181579", "0.61734796", "0.61494285", "0.61487633", "0.61447823", "0.61333835", "0.61279726", "0.6113043", "0.6107981", "0.61042255" ]
0.61706924
90
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['resnet_v2_152']()): net, end_points = networks_map['resnet_v2_152']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'resnet_v2_152/logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _get_layers(self) :\n \n return self._layers", "def add_bluprint_layers(self, models_dict, graph, class_names, dimensions,\n show_activation, show_constant_input):\n top_model = models_dict['top-model']\n # Get the top-model color list containing\n # the base color and the layer shades\n top_model_color = top_model[1]\n # get the model\n top_model = top_model[0]\n\n # Get the layers of the model\n layers = top_model[\"config\"][\"layers\"]\n # Loop through the layers\n for layer in layers:\n # If the layer is not a model\n if layer[\"class_name\"] != \"Model\":\n # Get the layer name\n layer_name = layer[\"name\"]\n # If label only layer's class name\n if class_names:\n # Get the layer's information\n layer_info = self.viz_utils.query_layer(layer_name,\n models_dict)\n # Get the layer's class name\n layer_class = layer_info['class_name']\n # If the layer is a a constant input layer,\n # manually specify the class name\n if layer_name.find('constant_input') != -1:\n layer_class = 'Constant Input'\n # Depending on the class name\n # find the the layer shade\n # If the layer is a constant_input layer\n # the color is black\n model_color = top_model_color[1].get(layer_class, \"black\")\n else:\n # If don't use class names for layers\n # then use the layer name from the JSON\n layer_class = layer_name\n model_color = top_model_color[0]\n\n\n # Add the node to the graph\n graph = self.viz_utils.add_nodes(layer_name, graph,\n layer_class, model_color,\n dimensions, show_constant_input)\n\n # Add Blueprint Inbound Edges\n graph = self.connect_blueprint_inbounds(models_dict,\n layer, graph,\n class_names, dimensions,\n show_activation, show_constant_input)\n else:\n # Add Softmod\n graph = self.connect_softmod_in_blueprint(models_dict,\n layer, graph, class_names,\n dimensions, show_activation, show_constant_input)\n\n return graph", "def get_all_layers(model):\n layers = []\n for l in model.layers:\n if hasattr(l, 'layers'):\n layers += get_all_layers(l)\n else:\n layers.append(l)\n return layers", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def layers(self):\n return self['layers']", "def layers(self) ->Optional[nn.ModuleList]:\n return self._layers", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def layers(self, x):\n raise NotImplementedError", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def UpdateLayers(self):\n pass", "def run(layers):", "def getLayers(self):\n return self.__layers", "def make_feature_layers(self, config):\n raise NotImplementedError", "def layers(self, layers):\n\n self._layers = layers", "def layers(self):\r\n return self._flc.layers", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def print_layers(model):\r\n for i in range(len(model.layers)):\r\n print(\"Printing layer shape: %d\" % i, model.layers[i])\r\n weights = model.layers[i].get_weights()\r\n for weight in weights: # Layer type\r\n print(weight.shape)", "def get_trainable_layers(self):\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def __repr__(self):\n return misc.describe_layer(self, name=\"model\")", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)", "def inception_model(layer_names):\n \n### START CODE HERE ###\n # Load InceptionV3 with the imagenet weights and **without** the fully-connected layer at the top of the network\n inception = tf.keras.applications.inception_v3.InceptionV3(include_top=False,weights='imagenet')\n\n # Freeze the weights of the model's layers (make them not trainable)\n inception.trainable = False\n \n # Create a list of layer objects that are specified by layer_names\n output_layers = [inception.get_layer(name).output for name in layer_names]\n\n # Create the model that outputs the content and style layers\n model = tf.keras.Model(inputs=inception.input, outputs=output_layers)\n \n # return the model\n return model", "def get_layers(model):\n layers = []\n for child in model.children():\n layer_name = child.__class__.__name__\n if layer_name in CONV_OPS:\n layers.append(Layer.from_conv(child))\n elif layer_name in UP_OPS:\n layers.append(Layer.from_up(child))\n else:\n layers.extend(get_layers(child))\n return layers", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def get_all(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n layers.append(layer)\n\n return layers", "def GetLayers(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetLayers(self, *args)", "def model_number_layers(model):\n for idx, layer in enumerate(model.layers):\n print(idx, layer.name)", "def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def loadLayers(self,\n layers: List[keras.layers.Layer]) -> None:\n self.__layers = layers\n\n # Model has been reset, redraw view\n self.modelReset.emit()\n return", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )", "def predict_visualize_layers(self, X):\n\n if isinstance(X, np.ndarray):\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n elif isinstance(X, tf.data.Dataset):\n X = X.map(self._reshape)\n\n for x, y in X:\n x = tf.expand_dims(x, 0)\n\n plt.title(\"Test Sample Input\")\n plt.grid(False)\n plt.imshow(x[0, :, :, 0], aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n layer_outputs = [layer.output for layer in self.model.layers]\n visualisation_model = tf.keras.models.Model(inputs=self.model.input, outputs=layer_outputs)\n\n visualisations = visualisation_model.predict(x)\n\n images_per_row = 4\n\n for layer_name, layer_activation in zip(map(lambda x : x.name, layer_outputs[:3]), visualisations[:3]):\n n_features = layer_activation.shape[-1]\n size = layer_activation.shape[1:3]\n n_cols = n_features // images_per_row\n grid = np.zeros((size[0] * n_cols, images_per_row * size[1]))\n\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n grid[col * size[0]: (col + 1) * size[0], row * size[1]: (row + 1) * size[1]] = channel_image\n\n plt.figure(figsize=(1. / size[0] * grid.shape[1], 3. / size[1] * grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(grid, aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n pred = np.argmax(visualisations[-1])\n print(f\"Predicted class: {Genre(pred)} with probability {visualisations[-1][0][pred]}\\n\"\n + f\"Actual class: {Genre(y)}\")", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n \n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def transformer_layers(self):\n return self._transformer_layers", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def hidden_layers(self):\n\t\tif self.hidden is None:\n\t\t\tself.hidden, self.inputs, self.weights_all, self.biases_all = [], [], [], []\n\t\t\tlast_hidden = self.x\n\t\t\tif self.covnet == 1:\n\t\t\t\ty_conv, self._drouput, self.hidden, self.inputs = deepnn(self.x)\n\t\t\telif self.covnet == 2:\n\t\t\t\ty_c, self.hidden, self.inputs = multi_layer_perceptron(self.x, self.input_size, self.num_of_classes,\n\t\t\t\t self.layerSize[0], self.layerSize[1])\n\t\t\telse:\n\n\t\t\t\tself._drouput = 'dr'\n\t\t\t\t# self.hidden.append(self.x)\n\t\t\t\tfor i in range(1, len(self.all_layer_sizes)):\n\t\t\t\t\tname_scope = 'hidden' + str(i - 1)\n\t\t\t\t\trow_size, col_size = self.all_layer_sizes[i - 1], self.all_layer_sizes[i]\n\t\t\t\t\tactivation_function = self.activation_function\n\t\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function,\n\t\t\t\t\t last_hidden)\n\t\t\t\tname_scope = 'final_layer'\n\t\t\t\trow_size, col_size = self.layerSize[-1], self.num_of_classes\n\t\t\t\tactivation_function = None\n\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function, last_hidden)\n\t\treturn self.hidden", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def keras_add_layers(model, num_classes, keep_prob):\n # DONE: Implement function\n\n # See also lesson \"FCN-8 Decoder\" for structure, and Long_Shelhamer paper\n\n # Walkthrough video started with 1x1 convolution like this, but notes explained\n # that was already done for us (loaded model is not ordinary VGG but already\n # adapted for FCN). In fact the VGG network provided looks very much like\n # the one generated by the Single-Shot Detector caffe code, so I guess they\n # share some common heritage.\n #conv_1x1 = tf.layers.conv2d(vgg_layer7_out, # at/near end of VGG\n # num_classes, # just road/nonroad for us\n # 1, # as 1x1 conv\n # padding='same',\n # kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3))\n\n # Using Tensorboard to visualise the structure of the Udacity VGG model provided, and\n # tf.trainable_variables() to list the dimensions and sizes of the weights and biases\n # for each layer, I arrive at this summary of what shape the output of each layer\n # is (knowing that we started with a 160 height x 576 width x 3 colour channel image).\n # All of the convolution layers have SAME padding and [1,1,1,1] strides so they\n # don't reduce the x-y pixel size. All the pooling layers have [1,2,2,1] strides so\n # they halve the pixel size. I'm ignoring the first dimension (across images), as\n # everything works on one image at a time.\n #\n # Layer name Details Output dimensions\n # <input> raw image 160x576x3\n # conv1_1 conv2d 3x3x3x64, Relu 160x576x64\n # conv1_2 conv2d 3x3x64x64, Relu 160x576x64\n # pool1 pool [1,2,2,1] 80x288x64\n # conv2_1 conv2d 3x3x64x128, Relu 80x288x128\n # conv2_2 conv2d 3x3x128x128, Relu 80x288x128\n # pool2 pool [1,2,2,1] 40x144x128\n # conv3_1 conv2d 3x3x128x256, Relu 40x144x256\n # conv3_2 conv2d 3x3x256x256, Relu 40x144x256\n # conv3_3 conv2d 3x3x256x256, Relu 40x144x256\n # pool3 pool [1,2,2,1] 20x72x256 --> layer3_out\n # conv4_1 conv2d 3x3x256x512, Relu 20x72x512\n # conv4_2 conv2d 3x3x512x512, Relu 20x72x512\n # conv4_3 conv2d 3x3x512x512, Relu 20x72x512\n # pool4 pool [1,2,2,1] 10x36x512 --> layer4_out\n # conv5_1 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_2 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_3 conv2d 3x3x512x512, Relu 10x36x512\n # pool5 pool [1,2,2,1] 5x18x512\n # fc6 conv2d 7x7x512x4096, Relu 5x18x4096\n # dropout dropout(keep_prob) 5x18x4096\n # fc7 conv2d 1x1x4096x4096, Relu 5x18x4096\n # dropout_1 dropout(keep_prob) 5x18x4096 --> layer7_out\n # layer8 conv2d_t 10x36\n\n layer3_out = model.get_layer('block3_pool').output\n layer4_out = model.get_layer('block4_pool').output\n\n # Problem here: TF2 library model doesn't have image-shaped layers 6 & 7 like\n # model provided originally with TF1, but instead is flattened amporphous classifier.\n # So we're working with more 'raw' layer as input. TODO should add back\n # two conv2d layers before this to be like the original\n drop_prob = 1.0 - keep_prob\n\n layer5 = model.get_layer('block5_pool')\n\n layer6_conv = tf.keras.layers.Conv2D(4096,\n 7, # 7x7 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess same as others\n name='layer6_conv')\n\n layer6_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer6_dropout\")\n\n layer7_conv = tf.keras.layers.Conv2D(4096,\n 1, # 1x1 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess\n name='layer7_conv')\n\n layer7_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer7_dropout\")\n\n # Connect up the new layers\n x = layer6_conv(layer5.output)\n x = layer6_dropout(x)\n x = layer7_conv(x)\n layer7 = layer7_dropout(x)\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=layer7)\n\n # We should now have the same structure as the original Udacity version of VGG16,\n # but still need to add the decoder and skip connections as before\n\n # Upsample by 2. We need to work our way down from a kernel depth of 4096\n # to just our number of classes (i.e. 2). Should we do this all in one go?\n # Or keep more depth in as we work upwards? For now doing it all in one hit.\n layer8 = tf.keras.layers.Conv2DTranspose(num_classes, #filters, \n 4, # kernel size taken from classroom example, might experiment\n strides=2, # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer8')\n\n # Now we're at 10x36x2 so we have same pixel resolution as layer4_out. Can't directly add\n # in layer4_out because it has filter depth of 512. (Though we could have had our transpose\n # convolution only downsample to 512 for compatibility... might try that later)\n\n # Squash layer4 output with 1x1 convolution so that it has compatible filter depth (i.e. num_classes)\n layer4_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters,\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer4_squashed')\n # upsample by 2\n layer9 = tf.keras.layers.Conv2DTranspose(num_classes, # filters\n 4, # kernel size taken from classroom example\n strides=(2,2), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer9')\n\n # Now we're at 20x72x2 so same pixel resolution as layer3_out, but need to squash that from\n # 256 filters to 2 (num_classes) before we can add it in as skip connection\n layer3_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer3_squashed')\n\n # upsample by 8 to get back to original image size\n layer10 = tf.keras.layers.Conv2DTranspose(num_classes,\n 32, # Finding quite large kernel works nicely\n strides=(8,8), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer10')\n\n # so now we should be at 160x576x2, same as original image size, 2 classes\n\n # Connect the layers\n x1 = layer8(layer7)\n x2 = layer4_squashed(layer4_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer8_plus_layer4 = tf.keras.layers.add([x1, x2], name='layer8_plus_layer4')\n #layer8_plus_layer4 = tf.add(layer8, layer4_squashed, name='layer8_plus_layer4')\n\n x1 = layer9(layer8_plus_layer4)\n x2 = layer3_squashed(layer3_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer9_plus_layer3 = tf.keras.layers.add([x1, x2], name='layer9_plus_layer3')\n #layer9_plus_layer3 = tf.add(layer9, layer3_squashed, name='layer9_plus_layer3')\n\n predictors = layer10(layer9_plus_layer3) # layer 10 should be same size as image\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=predictors)\n print(\"Model after adding decoder layers:\")\n mod_model.summary()\n\n return mod_model", "def iteration_layers(model, speedup, session, indepth_layer=None):\n if speedup is True:\n layer_names_reduced = ['conv2d1',\n 'conv2d2',\n 'mixed3b',\n 'mixed4b',\n 'mixed5b']\n layer_tensors = [session.graph.get_tensor_by_name(name + \":0\") for name in layer_names_reduced]\n else:\n layer_tensors = model.layer_tensors\n\n return layer_tensors", "def layers(self, layers):\n self._layers = layers\n self.thetas = []\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer\n self.thetas.extend(layer.thetas())", "def viewOnFlatLayer(layer, dimensions, name = None):\n assert max(dimensions) > 1, \"At least one dimension needs to be larger than one.\"\n def slicer():\n nbunits = reduce(lambda x, y: x*y, dimensions, 1)\n insize = layer.indim // nbunits\n outsize = layer.outdim // nbunits\n for index in range(nbunits):\n yield ModuleSlice(layer, insize*index, insize*(index+1), outsize*index, outsize*(index+1))\n c = slicer()\n return ModuleMesh(lambda: next(c), dimensions, name)", "def num_layers(self): # -> int:\n ...", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def __call__(cls, *args, **kwargs):\n layer = super(LayerAspect, cls).__call__(*args, **kwargs)\n\n if Job.Current:\n Job.Current.addLayer(layer)\n \n layer.afterInit()\n return layer", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def __init__(self, model):\n self.output_weights = model.get_layer(\"output\").get_weights()[0]\n self.cam_model = Model(inputs=model.input, outputs=(model.get_layer(\"activation\").output, model.get_layer(\"output\").output))", "def layers(self):\n\n if not self.last_node:\n return []\n return nuke.layers(self.last_node)", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def handle_layers(context, model, toplayer, layerids, materials, update, import_hidden=False):\n #setup main container to hold all layer collections\n layer_col_id=\"Layers\"\n if not layer_col_id in context.blend_data.collections:\n layer_col = context.blend_data.collections.new(name=layer_col_id)\n try:\n toplayer.children.link(layer_col)\n except Exception:\n pass\n else:\n #If \"Layers\" collection is in place, we assume the plugin had imported 3dm before\n layer_col = context.blend_data.collections[layer_col_id]\n\n # build lookup table for LayerTable index\n # from GUID, create collection for each\n # layer\n for lid, l in enumerate(model.Layers):\n if not l.Visible and not import_hidden:\n continue\n lcol = utils.get_iddata(context.blend_data.collections, l.Id, l.Name, None)\n layerids[str(l.Id)] = (lid, lcol)\n utils.tag_data(layerids[str(l.Id)][1], l.Id, l.Name)\n '''\n matname = l.Name + \"+\" + str(l.Id)\n if matname not in materials:\n laymat = utils.get_iddata(context.blend_data.materials, l.Id, l.Name, None)\n if update:\n\t laymat.use_nodes = True\n\t r, g, b, _ = l.Color\n\t principled = PrincipledBSDFWrapper(laymat, is_readonly=False)\n\t principled.base_color = (r/255.0, g/255.0, b/255.0)\n materials[matname] = laymat\n '''\n # second pass so we can link layers to each other\n for l in model.Layers:\n # link up layers to their parent layers\n if str(l.ParentLayerId) in layerids:\n parentlayer = layerids[str(l.ParentLayerId)][1]\n try:\n parentlayer.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass\n # or to the top collection if no parent layer was found\n else:\n try:\n layer_col.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass", "def mini_model(self):\n with tf.variable_scope(name_or_scope='human2d_network'):\n # down-sampling\n resi_0 = res_layer(self._input, filters=16, strides=2, kernel_size=7, training=self.training, name='resi_0')\n resi_1 = res_layer(resi_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_1')\n pool_0 = max_pool_layer(resi_1, name='pool_0')\n resi_2 = res_layer(pool_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_2')\n # hourglass module\n resi_3 = res_layer(resi_2, filters=64, strides=1, kernel_size=3, training=self.training, name='resi_3')\n hrgs_0 = hourglass_layer(resi_3, training=True, name='hrgs_0')\n # keypoint output\n keypoint_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_0')\n keypoint_pre_1 = res_layer(keypoint_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_1')\n keypoint_pre_2 = res_layer(keypoint_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_2')\n keypoint_output_raw = res_layer(keypoint_pre_2, filters=14, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='keypoint_output_raw')\n keypoint_output = tf.nn.sigmoid(x=keypoint_output_raw, name='keypoint_output')\n # silhouette output\n silhouette_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_0')\n silhouette_pre_1 = res_layer(silhouette_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_1')\n silhouette_pre_2 = res_layer(silhouette_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_2')\n silhouette_output_raw = res_layer(silhouette_pre_2, filters=2, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='silhouette_output_raw')\n silhouette_output = tf.nn.softmax(logits=silhouette_output_raw, name='silhouette_output')\n # return\n return None, None, keypoint_output, silhouette_output", "def get_all_structural(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n if layer.is_structural():\n\n layers.append(layer)\n\n # sort layers to make sure numbered nodes are added first and to maintain regular order\n layers.sort(key = lambda x: x.to_int())\n\n return layers", "def visualize_conv_layers(self, layer_name='conv1', savefig_path=\"\"):\n\n # The name of the layer we want to visualize\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n\n # Visualize all filters in this layer.\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n\n # Generate input image for each filter. Here `text` field is used to overlay `filter_value` on top of the image.\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n\n # Generate stitched image palette with 8 cols.\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n\n print('debug')", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n plot_model(vgg, 'vgg19_diagram.png')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def print_layer_io_shapes(model):\n for i, _ in enumerate(model.layers):\n print(\"layer {} input: \".format(i), model.layers[i].input_shape)\n print(\"layer {} output:\".format(i), model.layers[i].output_shape)", "def getVisibilityLayers(self):\n return self._VisibilityLayers", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def visualize_model(self, ax):\n ax.imshow(self.w[1:].reshape(28, -1, order='F').T, cmap='bone')", "def __call__(self):\n custom_obj = {'tf': tf, 'relu6': tf.nn.relu6}\n wfile = self._get_model_weights()\n model = tf.keras.models.load_model(wfile, custom_objects=custom_obj)\n\n if not self._trainable:\n # freeze encoder layers up to\n # expanded_conv_16_project_BN\n for layer in model.layers[1:147]:\n layer.trainable = False\n\n return model", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def list_layers(service):\n r = _post(service)\n if 'layers' in r:\n return [layer(p) for p in r['layers']]\n return", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def ApplyInputs(ss, en):\n ss.Net.InitExt()\n\n lays = [\"Input\", \"Output\"]\n for lnm in lays :\n ly = leabra.Layer(ss.Net.LayerByName(lnm))\n pats = en.State(ly.Nm)\n if pats != 0:\n ly.ApplyExt(pats)", "def filesystem_layers(self):\n pass", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def __init__(self,inputSize,outputSize, *args, **kwds):\n #currently the code is only for 2 hidden layers, apart from in and out\n self._saveFile = kwds.get('saveFile')\n self._inputSize = inputSize\n self._outputSize= outputSize\n self._layer1 = keras.layers.Dense(128,activation='relu')\n self._layer2 = keras.layers.Dense(64,activation='relu') \n self._layer3 = keras.layers.Dense(128,activation='relu')\n self._piLayer = keras.layers.Dense(self._outputSize-1,activation='softmax')\n self._zLayer = keras.layers.Dense(1,activation='tanh')\n self._inputs = keras.Input(shape=(self._inputSize,)) #returns placeholder\n x = self._layer1(self._inputs)\n x = self._layer2(x)\n x = self._layer3(x)\n self._outPi = self._piLayer(x)\n self._outZ = self._zLayer(x)\n self._output = keras.layers.concatenate([self._outPi,self._outZ],axis = -1)\n self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n# self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n self._model.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.99, beta_2=0.999, epsilon=1e-10, decay=0.0001),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n self._epochSize = 256", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def common_layers_with_encoder(self):\n return [\n self.self_attention, self.self_attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_layer_norm\n ]", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def _get_layers(self):\n from keras.engine.topology import InputLayer\n\n layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)]\n logger.info('Inferred %i hidden layers on Keras classifier.', len(layer_names))\n\n return layer_names", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def __init__(self): #initializing\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 1) # one input/feature , one output\n # here where other NN layers are added", "def vis_layer(model, layer, channel):\n num_channels = dla_lucid.LAYERS[layer][1]\n all_vis = []\n for i in range(num_channels):\n if channel is True:\n vis = vis_channel(model, layer, i)\n else:\n vis = vis_neuron(model, layer, i)\n all_vis.append(vis)\n\n all_vis_array = np.array(all_vis)\n return all_vis_array", "def setup_to_transfer_learn(model):\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def LayerAddflatten(bottom_model, num_classes):\n top_model = bottom_model.output\n top_model = Flatten(name = \"flatten\")(top_model)\n top_model = Dense(526, activation = \"relu\")(top_model)\n top_model = Dense(263, activation = \"relu\")(top_model)\n top_model = Dense(num_classes, activation = \"sigmoid\")(top_model)\n return top_model", "def _export_model(self):\n graph = ComputeGraph.from_onnx(self.onnx_model.graph)\n\n print(\"Running constant propagation\")\n constant_states = constant_propagation(graph)\n\n self._remove_constants(graph, constant_states)\n self._remove_nops(graph, constant_states)\n\n # Add shape information from constant propagation:\n for var, res in constant_states.items():\n if var in graph.shape_dict:\n shape = graph.shape_dict[var]\n if res.shape != shape:\n print(\"Warning: Shapes do not match: \", var, res.shape, shape)\n if res.shape is not None:\n print(\"Replacing shape {} with {}\".format(shape, res.shape))\n graph.shape_dict[var] = res.shape\n elif res.shape is not None:\n graph.shape_dict[var] = res.shape\n\n print(\"Inference graph:\")\n for node in graph.nodes:\n inputs = node.inputs\n input_shapes = (str(graph.shape_dict[i]) for i in node.inputs if i in graph.shape_dict)\n outputs = node.outputs\n output_shapes = (str(graph.shape_dict[o]) for o in node.outputs if o in graph.shape_dict)\n print(\"{:<24} {:<20} {:<30} {:<30} {:<20} {:<30}\".format(node.name,\n node.op_type,\n \",\".join(inputs),\n \",\".join(input_shapes),\n \",\".join(outputs),\n \",\".join(output_shapes)))\n\n memory_manager = MemoryManager()\n\n self._generate_weights_file(graph)\n\n self.dummy_input = generate_dummy_main(graph)\n\n self.reference_input = generate_reference_main(graph)\n\n self._generate_network_initialization(graph, memory_manager)\n\n self._generate_network_cleanup(graph, memory_manager)\n\n implementations = self._select_implementations(graph, memory_manager)\n schedule = self._get_schedule(graph, implementations)\n # self._print_live_ranges(schedule)\n\n input_names = [\"input_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.inputs]\n output_names = [\"output_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.outputs]\n\n \"\"\"Currently we only allow single input (no batch processing) to the CNN, but this may be multi-channel input\"\"\"\n inputs = graph.inputs\n if len(inputs) > 1:\n print(\"ERROR: Multiple inputs not supported!\")\n exit(1)\n else:\n input_shape = graph.shape_dict[inputs[0].name]\n print(\"Input shape: {}\".format(input_shape))\n\n if len(input_shape) == 4:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 3:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 2:\n print(\"Input is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n outputs = graph.outputs\n if len(outputs) > 1:\n print(\"ERROR: Multiple outputs not supported\")\n exit(1)\n else:\n output_shape = graph.shape_dict[outputs[0].name]\n print(\"Output shape: {}\".format(output_shape))\n\n if len(output_shape) == 2:\n print(\"Output is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n output_defs = [\"pico_cnn::naive::Tensor *\" + n for n in output_names]\n elif len(output_shape) == 3:\n print(\"ERROR: Unknown output shape of network: {}\".format(output_shape))\n exit(1)\n elif len(output_shape) == 4:\n print(\"ERROR: Multi-dimensional output is currently not supported.\")\n exit(1)\n\n network_def = \"void Network::run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n network_def_header = \"void run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n\n layer_declaration_code = \"\"\n layer_allocation_code = \"\"\n layer_execution_code = \"\"\n layer_deletion_code = \"\"\n\n \"\"\"Iterate over all tasks in the schedule, put some debug info in the code and the pico-cnn implementation.\"\"\"\n for task in schedule:\n num, node, impl = task\n layer_allocation_code += \" //Layer \" + str(num) + \" \" + node.name + \" \" + node.op_type + \"\\n\"\n layer_allocation_code += \" //Attributes\\n\"\n for key, val in node.attrs.items():\n layer_allocation_code += \" // \" + str(key) + \": \" + str(val) + \"\\n\"\n layer_allocation_code += \" //Parameters\\n\"\n layer_allocation_code += \" //Inputs: \" + \",\".join(node.inputs) + \"\\n\"\n layer_allocation_code += \" //Outputs: \" + \",\".join(node.outputs) + \"\\n\"\n layer_allocation_code += \" //Shape:\\n\"\n for i in node.inputs:\n layer_allocation_code += \" // {}: {}\\n\".format(i, graph.get_shape(i))\n for o in node.outputs:\n layer_allocation_code += \" // {}: {}\\n\".format(o, graph.get_shape(o))\n\n if impl:\n layer_declaration_code += impl.generate_declaration()\n layer_declaration_code += \"\\n\"\n\n layer_allocation_code += impl.generate_allocation()\n layer_allocation_code += \"\\n\"\n\n layer_execution_code += impl.generate_execution()\n layer_execution_code += \"\\n\"\n\n layer_deletion_code += impl.generate_deletion()\n layer_deletion_code += \"\\n\"\n\n else:\n print(\"ERROR: Unsupported layer: {}! Aborting code generation.\".format(node.op_type))\n return 1\n\n self.constructor_code += layer_allocation_code + \"\\n\"\n self.destructor_code += layer_deletion_code + \"\\n\"\n\n # # TODO: What does this loop do?\n # for id, buffer in memory_manager.buffers.items():\n # if graph.is_tensor(id):\n # continue\n # if graph.is_input(id):\n # continue\n # if graph.is_output(id):\n # continue\n\n network_code: Text = \"#include \\\"network.h\\\"\\n\\n\"\n network_code += \"Network::Network() {\\n\\n\"\n network_code += self.constructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += \"Network::~Network() {\\n\"\n network_code += self.destructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += network_def+\"{\\n\"\n network_code += layer_execution_code\n\n network_code += \"}\\n\\n\"\n\n network_header = \"#ifndef NETWORK_H\\n\"\n network_header += \"#define NETWORK_H\\n\\n\"\n network_header += \"#include \\\"pico-cnn/pico-cnn.h\\\"\\n\\n\"\n network_header += \"class Network {\\n\"\n network_header += \"public:\\n\"\n network_header += \"Network();\\n\"\n network_header += \"~Network();\\n\"\n network_header += network_def_header + \"; \\n\\n\"\n network_header += self.buffer_declaration + \"\\n\"\n network_header += layer_declaration_code\n network_header += \"};\\n\"\n network_header += \"#endif //NETWORK_H\\n\"\n\n self.network_code = network_code\n self.network_header = network_header\n\n \"\"\"\n Create Makefile containing a target for the generated dummy input and a network specific one.\n The code for the network specific input has to be written manually.\n \"\"\"\n # TODO: Does this need to be more sophisticated?\n self.makefile = \"CC = g++\\n\"\n self.makefile += \"CFLAGS = -std=c++11 -Wall -O2 -march=native -DINFO\\n\"\n self.makefile += \"LDFLAGS = -L../../../pico-cnn\\n\"\n self.makefile += \"LD_LIBS = -lpico-cnn -lm\\n\\n\"\n self.makefile += \"# list of all generated .cpp files.\\n\"\n self.makefile += \"NETWORK_LIST = network.cpp\"\n self.makefile += \"\\n\\ndummy_input: dummy_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) dummy_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) $(LDFLAGS) $(LD_LIBS) -o dummy_input\"\n self.makefile += \"\\n\\nreference_input: reference_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) reference_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o reference_input\"\n self.makefile += \"\\n\\n{}: {}.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\".format(self.model_name, self.model_name)\n self.makefile += \"$(CC) {}.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o {}\".format(self.model_name, self.model_name)\n self.makefile += \"\\n\\nall: dummy_input reference_input {}\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: clean\\n\"\n self.makefile += \"clean:\\n\\trm -rf {} dummy_input reference_input\\n\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: libpico-cnn.a\\n\"\n self.makefile += \"libpico-cnn.a:\\n\\t$(MAKE) -C ../../../pico-cnn\"\n\n self.save(\"./generated_code/{}\".format(self.model_name))", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def __call__(self, inputs, **kwargs):\n # Actually call the layer (optionally building it).\n output = super(Layer, self).__call__(inputs, **kwargs)\n if context.in_eager_mode():\n return output\n\n # Un-built subclassed network: build it\n if isinstance(self, Network) and not self.inputs:\n self._set_inputs(inputs, training=kwargs.get('training'))\n\n # Update learning phase info.\n output_tensors = to_list(output)\n uses_lp = any(\n [getattr(x, '_uses_learning_phase', False) for x in to_list(inputs)])\n uses_lp = getattr(self, 'uses_learning_phase', False) or uses_lp\n for i in range(len(output_tensors)):\n output_tensors[i]._uses_learning_phase = getattr(\n output_tensors[i], '_uses_learning_phase', False) or uses_lp\n\n # Optionally load weight values that were specified at layer instantiation.\n if hasattr(self, '_initial_weights') and self._initial_weights is not None:\n self.set_weights(self._initial_weights)\n del self._initial_weights\n return output", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def common_layers(self):\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n CustomTransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(CustomTransformer, self).build(unused_input_shapes)", "def create_feature_layers(self):\n feature_columns = [tf.feature_column.numeric_column(name,\n normalizer_fn=lambda x: (x - self.train_features[\n name].mean()) /\n self.train_features[name].std())\n for name in self.feature_names]\n\n self.feature_layers = layers.DenseFeatures(feature_columns)\n return 'feature layers had been created'", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf" ]
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099", "0.57425326", "0.57222074", "0.5712829", "0.5712829", "0.56764674", "0.56520236", "0.56172395", "0.5594013", "0.55928683", "0.5580619", "0.5579592", "0.55595297", "0.5550327", "0.55495214", "0.5527533", "0.5517842", "0.55077225", "0.5502721", "0.55026466", "0.549963", "0.5492102", "0.5485171", "0.54708755", "0.54690754", "0.5463765", "0.5462446", "0.5460833", "0.5456318", "0.5452711", "0.5444138", "0.5443301", "0.5439382", "0.54338735", "0.5421428", "0.5419587", "0.5408111", "0.54061496", "0.5398926", "0.5398483", "0.53930515", "0.53733885", "0.53709096", "0.53703576", "0.5361642", "0.5359667", "0.53589463", "0.5354624", "0.53545886", "0.535422", "0.53375673", "0.5334994", "0.53269535", "0.53221035", "0.5317047", "0.53027296", "0.52973855", "0.52901715", "0.5275039", "0.5274979", "0.52737033", "0.5266431", "0.5261835", "0.52605796", "0.52600664", "0.52496713", "0.52330184", "0.5231018", "0.52298784", "0.5227895", "0.522487", "0.52173674", "0.52170604", "0.5214551", "0.52106416", "0.5199109", "0.5191151", "0.518876", "0.5188223", "0.5186258", "0.5180307" ]
0.0
-1
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(mobilenet_v2, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['mobilenet_v2']()): net, end_points = networks_map['mobilenet_v2']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'Logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def initialize(self):\n logging.info(\"Loading model.\")\n\n self._bleurt_graph = tf.Graph()\n with self._bleurt_graph.as_default():\n\n imported = tf.saved_model.load(self.checkpoint)\n bleurt_model_ops = imported.signatures[\"serving_default\"]\n self._bleurt_ops = bleurt_model_ops(\n input_ids=tf.compat.v1.placeholder(tf.int64, name=\"input_ids\"),\n input_mask=tf.compat.v1.placeholder(tf.int64, name=\"input_mask\"),\n segment_ids=tf.compat.v1.placeholder(tf.int64, name=\"segment_ids\"))\n\n init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.tables_initializer())\n\n self.session = tf.compat.v1.Session(graph=self._bleurt_graph)\n self.session.run(init_op)\n\n logging.info(\"Done.\")", "def create_graph(self):\n self.graph = tf.Graph()\n model_type = self.options['model_type']\n optimiser_selected = self.options['optimizer']\n\n with self.graph.as_default():\n self.tf_dataset = tf.placeholder(tf.float32,\n shape=(None, self.options['num_steps'], self.input_dimensions))\n self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))\n self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')\n\n # Forward pass\n if model_type == 'rnn':\n self.predict = self.rnn_model(self.tf_dataset)\n elif model_type == 'lstm':\n self.predict = self.lstm_model(self.tf_dataset)\n else:\n raise NotImplementedError(\"Unimplemented RNN model keyword\")\n\n self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))\n\n if self.options['regularisation_coeff'] > 0.:\n # Add in L2 penalty for regularisation if required\n penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)\n for var in tf.trainable_variables())\n self.loss += penalty\n\n if self.options['use_customised_optimizer'] is False:\n if optimiser_selected == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif optimiser_selected == 'grad':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif optimiser_selected == 'ada':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif optimiser_selected == 'rms':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n else:\n raise NotImplementedError(\"Unimplemented built-in optimiser keyword.\")\n else:\n self.optimizer = self.options['customized_optimizer']\n self.minimise = self.optimizer.minimize(self.loss)", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _initialize_session(self):\n config = tf.ConfigProto()\n # restrict model GPU memory utilization to min required\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n tf_ver = int(tf.__version__.split('.')[1])\n if TF_VERSION <= 0.10:\n self.sess.run(tf.initialize_all_variables())\n logswriter = tf.train.SummaryWriter\n else:\n self.sess.run(tf.global_variables_initializer())\n logswriter = tf.summary.FileWriter\n self.saver = tf.train.Saver()\n self.summary_writer = logswriter(self.logs_path, graph=self.sess.graph) # change by ccx, add the graph_def", "def _build_graph(self):\n\n self.graph = tf.Graph()\n\n # set self.graph as default graph\n with self.graph.as_default():\n # # clear old variables\n # tf.reset_default_graph()\n\n # set random seed\n if self.random_seed is not None:\n tf.set_random_seed(self.random_seed)\n\n self._create_placeholders()\n self._create_variables()\n\n self._create_prediction()\n\n self._create_loss()\n self._create_optimizer()\n\n self._init = tf.global_variables_initializer()\n\n self.saver = tf.train.Saver()\n\n # create session\n self.sess = tf.Session(graph=self.graph)", "def init_target_net(self, sess):\n sess.run(self.init_target_net_op)", "def setup(self, context: ExecutionContext) -> BaseStep:\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True", "def _init_session(self):\n self.sess = tf.Session(graph=self.g)\n self.sess.run(self.init)", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W1 = tf.get_variable(\"W1\", shape=[self.h, self.N], initializer=tf.truncated_normal_initializer)\n self.b1 = tf.get_variable(\"b1\", shape=[self.h, 1], initializer=tf.zeros_initializer)\n\n self.W2 = tf.get_variable(\"W2\", shape=[self.C, self.h], initializer=tf.truncated_normal_initializer)\n self.b2 = tf.get_variable(\"b2\", shape=[self.C, 1], initializer=tf.truncated_normal_initializer)\n\n self.z1 = tf.matmul(self.W1, self.X) + self.b1\n self.a1 = self.activation(self.z1)\n\n self.z2 = tf.matmul(self.W2, self.a1) + self.b2\n self.y_hat = tf.nn.softmax(self.z2, dim=0)\n\n self.l2_reg = tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.W2)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z2, dim=0)) \\\n + self.beta * self.l2_reg\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def _initialize_eval_graph(self):\n self.X_test_tf = tf.placeholder(tf.int64, shape=[1, 3])\n\n self.table_entity_lookup_left = None\n self.table_entity_lookup_right = None\n self.table_reln_lookup = None\n\n all_entities_np = np.int64(np.arange(len(self.ent_to_idx)))\n\n if self.is_filtered:\n all_reln_np = np.int64(np.arange(len(self.rel_to_idx)))\n self.table_entity_lookup_left = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_left, dtype=np.int64))\n , 0)\n self.table_entity_lookup_right = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_entities_np,\n np.array(self.entity_primes_right, dtype=np.int64))\n , 0)\n self.table_reln_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(all_reln_np,\n np.array(self.relation_primes, dtype=np.int64))\n , 0)\n\n # Create table to store train+test+valid triplet prime values(product)\n self.table_filter_lookup = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(np.array(self.filter_keys, dtype=np.int64),\n np.zeros(len(self.filter_keys), dtype=np.int64))\n , 1)\n\n corruption_entities = self.eval_config.get('corruption_entities', DEFAULT_CORRUPTION_ENTITIES)\n\n if corruption_entities == 'all':\n corruption_entities = all_entities_np\n elif isinstance(corruption_entities, list):\n corruption_entities = corruption_entities\n else:\n msg = 'Invalid type for corruption entities!!!'\n logger.error(msg)\n raise ValueError(msg)\n\n self.corruption_entities_tf = tf.constant(corruption_entities, dtype=tf.int64)\n\n self.out_corr, self.out_corr_prime = generate_corruptions_for_eval(self.X_test_tf,\n self.corruption_entities_tf,\n self.eval_config.get('corrupt_side',\n DEFAULT_CORRUPT_SIDE),\n self.table_entity_lookup_left,\n self.table_entity_lookup_right,\n self.table_reln_lookup)\n\n if self.is_filtered:\n # check if corruption prime product is present in dataset prime product\n self.presense_mask = self.table_filter_lookup.lookup(self.out_corr_prime)\n self.filtered_corruptions = tf.boolean_mask(self.out_corr, self.presense_mask)\n else:\n self.filtered_corruptions = self.out_corr\n\n self.concatinated_set = tf.concat([self.X_test_tf, self.filtered_corruptions], 0)\n\n e_s, e_p, e_o = self._lookup_embeddings(self.concatinated_set)\n self.scores_predict = self._fn(e_s, e_p, e_o)\n self.score_positive = tf.gather(self.scores_predict, 0)\n self.rank = tf.reduce_sum(tf.cast(self.scores_predict >= self.score_positive, tf.int32))", "def _setup_graph(self):\n sess = tf.Session()\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n # raise NotImplementedError\n state_ph, action_ph, next_state_ph, reward_ph = self._setup_placeholders()\n next_state_pred = self._dynamics_func(state_ph, action_ph)\n loss, optimizer = self._setup_training(state_ph, next_state_ph, next_state_pred)\n\n # fit cost function\n reward_pred = self._reward_func(state_ph, action_ph, next_state_pred)\n reawrd_loss, reward_optimizer = self._reward_training(reward_ph, reward_pred)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n # self._rollout_state_ph = tf.placeholder(tf.float32, (1, self._state_dim), name='rollout_state_ph')\n best_action = self._setup_action_selection(state_ph)\n\n # BONUS\n self._best_action_cross_entropy = self._cross_entropy_action_selection(state_ph)\n\n sess.run(tf.global_variables_initializer())\n\n return sess, state_ph, action_ph, next_state_ph, reward_ph, \\\n next_state_pred, loss, optimizer, best_action, reward_pred, reawrd_loss, reward_optimizer", "def add_initializer_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"initialization\") as scope:\n self.init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer())", "def _init_session(self):\n self.sess = tf.Session(config=self.config, graph=self.g)\n self.sess.run(self.init)", "def setup(self):\n if not hasattr(logger, 'LOG_DIR'):\n raise RuntimeError(\"logger directory wasn't set!\")\n\n self._setup() # subclass will setup the graph\n\n describe_model()\n # some final operations that might modify the graph\n logger.info(\"Setup summaries ...\")\n self.summary_writer = tf.summary.FileWriter(logger.LOG_DIR, graph=tf.get_default_graph())\n # create an empty StatHolder\n self.stat_holder = StatHolder(logger.LOG_DIR)\n\n logger.info(\"Setup callbacks graph ...\")\n self.config.callbacks.setup_graph(weakref.proxy(self))\n self.config.session_init._setup_graph()\n\n def after_init(scaffold, sess):\n logger.info(\"Graph variables initialized.\")\n self.config.session_init._run_init(sess)\n\n scaffold = tf.train.Scaffold(\n init_op=tf.global_variables_initializer(),\n init_fn=after_init)\n logger.info(\"Finalize the graph, create the session ...\")\n self.monitored_sess = tf.train.MonitoredSession(\n session_creator=tf.train.ChiefSessionCreator(\n scaffold=scaffold, config=self.config.session_config),\n hooks=self.config.callbacks.get_hooks())\n self.hooked_sess = self.monitored_sess # just create an alias\n self.sess = self.monitored_sess._tf_sess() # expose the underlying session also", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def initialize_session(self):\r\n self.logger.info(\"Initializing tf session\")\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.saver = tf.train.Saver()", "def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_50, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])", "def __init__(self, resnet_size, bottleneck, num_classes, \n num_filters, kernel_size, conv_stride, time_kernel_size,\n first_pool_size, first_pool_stride,\n block_sizes, block_strides,\n final_size, data_format=None,\n model_name_scope='resnet_model'):\n self.resnet_size = resnet_size\n\n if not data_format:\n data_format = (\n 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')\n\n self.resnet_version = 2\n\n self.bottleneck = bottleneck\n if bottleneck:\n self.block_fn = _bottleneck_block_v2\n else:\n self.block_fn = _building_block_v2\n\n self.data_format = data_format\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.conv_stride = conv_stride\n self.time_kernel_size = time_kernel_size\n self.first_pool_size = first_pool_size\n self.first_pool_stride = first_pool_stride\n self.block_sizes = block_sizes\n self.block_strides = block_strides\n self.final_size = final_size\n self.dtype = tf.float32\n self.pre_activation = True\n self.model_name_scope = model_name_scope", "def __init__(self, sess, network, learning_rate=0.1, discount_factor=0.99):\n self.sess = sess\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.network = network\n self.defineUpdateOperations()\n self.init = tf.global_variables_initializer()\n self.initialize_variables()", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def _init_vars(self):\n print \"Initializing session\"\n self.x = tf.placeholder(tf.float32, shape=[None, 784])\n self.y = tf.placeholder(tf.float32, shape=[None, 10])", "def _build_graph(self, seed):\n self.g = tf.Graph()\n with self.g.as_default():\n tf.set_random_seed(seed)\n self._placeholders()\n self._policy_nn()\n self._loss_train_op()\n self.init = tf.global_variables_initializer()", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n session_conf = tf.ConfigProto(\n allow_soft_placement=self.FLAGS.allow_soft_placement,\n log_device_placement=self.FLAGS.log_device_placement)\n self.session = tf.Session(config=session_conf)\n self.session.run(tf.global_variables_initializer())\n try: \n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.FLAGS.num_checkpoints)\n except:\n pass", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n self.sess = tf.compat.v1.Session()\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()", "def initialize_and_train(self):\n self.probabilities = tf.nn.softmax(self.hidden_layer3,name = 'test_probabilities')\n \n \"\"\"Calulates 10 probabilities based off of our input nodes, than calculates the error using\n cross entropy function, which turns those ten probabilities into an integer value. we than take \n the mean of the cross entropy errors. Logits are the values to be used as input to softmax\"\"\"\n self.error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits = self.hidden_layer3, labels = self.outputs, name = 'error'))\n \"\"\"initialize all of our variables with acutal numbers\"\"\"\n with tf.Session() as session:\n session.run(self.filters.initializer)\n session.run(self.filters2.initializer)\n session.run(self.weights.initializer)\n session.run(self.weights2.initializer)\n session.run(self.bias.initializer)\n session.run(self.bias2.initializer)\n session.run(self.weights3.initializer)\n session.run(self.bias3.initializer)\n \"\"\"create gradient descent function\"\"\"\n self.train = tf.train.GradientDescentOptimizer(0.1).minimize(self.error)\n \n \"\"\"these are our two index's that give us our batch size for gradient descent below\"\"\"\n index1 = 0\n index2 = 500\n \"\"\"this for loop runs mini-batch gradient descent and prints error every ith iteration\"\"\"\n for i in range(4500): \n \"\"\"if our second index is less than the # of training sets, input propper index in feed_dict and run\"\"\"\n if index2 < int(self.images.shape[0]): \n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]} \n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"add 500 to each index and continue iterations\"\"\"\n index1 += 500\n index2 += 500\n \n elif index2 >= int(self.images.shape[0]):\n \"\"\"if our second index is greater than or equal to # of training sets, \n input propper index in feed_dict and run\"\"\"\n index2 == int(self.images.shape[0])\n feed_dict = {self.inputs : self.images[index1:index2], self.outputs : self.labels[index1:index2]}\n session.run(self.train, feed_dict)\n iteration = i+1\n \"\"\"reset the index back to its orginal value and continue iterations\"\"\"\n index1 = 0\n index2 = 500 \n\n if iteration % 100 == 0: \n print(index1,index2)\n print('#', iteration, 'error is:', session.run(self.error, feed_dict))\n \"\"\"save the final results of our weights/filter variables as outputfile\"\"\"\n self.saver = tf.train.Saver() \n self.saver.save(session, \"/Users/bennicholl/Desktop/outputfile\")\n \n \"\"\"this below code is for tensorboard, a data visualization tool\"\"\"\n \"\"\"open local host:6006 on chrome, than type in hashtagged code block below in a terminal\"\"\"\n #python -m tensorboard.main --logdir=\"/Users/bennicholl/Desktop/output3\"\n with tf.Session() as session:\n writer = tf.summary.FileWriter(\"/Users/bennicholl/Desktop/output3\", session.graph)\n writer.close()", "def define_graph(self):\n with tf.name_scope('discriminator'):\n ##\n # Setup scale networks. Each will make the predictions for images at a given scale.\n ##\n\n self.scale_nets = []\n for scale_num in xrange(self.num_scale_nets):\n with tf.name_scope('scale_net_' + str(scale_num)):\n scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)\n self.scale_nets.append(DScaleModel(scale_num,\n int(self.height * scale_factor),\n int(self.width * scale_factor),\n self.scale_conv_layer_fms[scale_num],\n self.scale_kernel_sizes[scale_num],\n self.scale_fc_layer_sizes[scale_num]))\n\n # A list of the prediction tensors for each scale network\n self.scale_preds = []\n for scale_num in xrange(self.num_scale_nets):\n self.scale_preds.append(self.scale_nets[scale_num].preds)\n\n ##\n # Data\n ##\n\n self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')\n\n ##\n # Training\n ##\n\n with tf.name_scope('training'):\n # global loss is the combined loss from every scale network\n self.global_loss = adv_loss(self.scale_preds, self.labels)\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')\n self.train_op = self.optimizer.minimize(self.global_loss,\n global_step=self.global_step,\n name='train_op')\n\n # add summaries to visualize in TensorBoard\n loss_summary = tf.summary.scalar('loss_D', self.global_loss)\n self.summaries = tf.summary.merge([loss_summary])", "def __init__(self, graph, weights,\n input_tensor_name=None,\n output_tensor_name=None):\n\n self.sess = tf.Session()\n new_saver = tf.train.import_meta_graph(graph)\n new_saver.restore(self.sess, weights)\n\n get_tensor = tf.get_default_graph().get_tensor_by_name\n # Get the initial place holder, else default\n if input_tensor_name:\n self.placeholder = get_tensor(input_tensor_name)\n else:\n self.placeholder = get_tensor('Placeholder:0')\n\n if output_tensor_name:\n self.softmax = get_tensor(output_tensor_name)\n else:\n self.softmax = get_tensor('Softmax:0')\n\n # Save trainables into params\n trainable_params = tf.trainable_variables()\n layers = {}\n params = {}\n\n def add_to_layer(name):\n try:\n layers[name] = get_tensor(\"{}:0\".format(name))\n except KeyError:\n try:\n layers[name] = get_tensor(\"{}/Relu:0\".format(name))\n except KeyError:\n print(\"Activation Not Found.\")\n pass\n\n for v in trainable_params:\n if 'weight' in v.name:\n name = v.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Pooling layers usually don't have a nice way of gathering.\n for n in tf.get_default_graph().as_graph_def().node:\n if 'pool' in n.name:\n v = get_tensor(\"{}:0\".format(n.name))\n name = n.name.split('/')[0]\n params[name] = v\n add_to_layer(name)\n\n # Get trainable params - 1 holds locations the other is a dummy script\n self.params = {}\n self._params = params\n self.layers = layers\n # Save empty dict into blobs\n self.blobs = {}", "def build_graph(self):\n # Print\n if self.verbose:\n print('Building Yolo Graph....')\n # Reset default graph\n tf.reset_default_graph()\n # Input placeholder\n self.x = tf.placeholder('float32', [None, 448, 448, 3])\n # conv1, pool1\n self.conv1 = self.conv_layer(1, self.x, 64, 7, 2)\n self.pool1 = self.maxpool_layer(2, self.conv1, 2, 2)\n # size reduced to 64x112x112\n # conv2, pool2\n self.conv2 = self.conv_layer(3, self.pool1, 192, 3, 1)\n self.pool2 = self.maxpool_layer(4, self.conv2, 2, 2)\n # size reduced to 192x56x56\n # conv3, conv4, conv5, conv6, pool3\n self.conv3 = self.conv_layer(5, self.pool2, 128, 1, 1)\n self.conv4 = self.conv_layer(6, self.conv3, 256, 3, 1)\n self.conv5 = self.conv_layer(7, self.conv4, 256, 1, 1)\n self.conv6 = self.conv_layer(8, self.conv5, 512, 3, 1)\n self.pool3 = self.maxpool_layer(9, self.conv6, 2, 2)\n # size reduced to 512x28x28\n # conv7 - conv16, pool4\n self.conv7 = self.conv_layer(10, self.pool3, 256, 1, 1)\n self.conv8 = self.conv_layer(11, self.conv7, 512, 3, 1)\n self.conv9 = self.conv_layer(12, self.conv8, 256, 1, 1)\n self.conv10 = self.conv_layer(13, self.conv9, 512, 3, 1)\n self.conv11 = self.conv_layer(14, self.conv10, 256, 1, 1)\n self.conv12 = self.conv_layer(15, self.conv11, 512, 3, 1)\n self.conv13 = self.conv_layer(16, self.conv12, 256, 1, 1)\n self.conv14 = self.conv_layer(17, self.conv13, 512, 3, 1)\n self.conv15 = self.conv_layer(18, self.conv14, 512, 1, 1)\n self.conv16 = self.conv_layer(19, self.conv15, 1024, 3, 1)\n self.pool4 = self.maxpool_layer(20, self.conv16, 2, 2)\n # size reduced to 1024x14x14\n # conv17 - conv24\n self.conv17 = self.conv_layer(21, self.pool4, 512, 1, 1)\n self.conv18 = self.conv_layer(22, self.conv17, 1024, 3, 1)\n self.conv19 = self.conv_layer(23, self.conv18, 512, 1, 1)\n self.conv20 = self.conv_layer(24, self.conv19, 1024, 3, 1)\n self.conv21 = self.conv_layer(25, self.conv20, 1024, 3, 1)\n self.conv22 = self.conv_layer(26, self.conv21, 1024, 3, 2)\n self.conv23 = self.conv_layer(27, self.conv22, 1024, 3, 1)\n self.conv24 = self.conv_layer(28, self.conv23, 1024, 3, 1)\n # size reduced to 1024x7x7\n # fc1, fc2, fc3\n self.fc1 = self.fc_layer(29, self.conv24, 512,\n flatten=True, linear=False)\n self.fc2 = self.fc_layer(\n 30, self.fc1, 4096, flatten=False, linear=False)\n self.fc3 = self.fc_layer(\n 31, self.fc2, 1470, flatten=False, linear=True)\n # Run session\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, self.weightFile)\n # Print\n print('Graph built.')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def build_graph(self):\n\n ##### Build Graph #####\n baseModel.build_graph(self)\n\n ##### Create Optimization #####\n with tf.variable_scope(\"optimize\"):\n self.add_loss()\n self.add_accuracy()\n self.initialize_learning_rate()\n self.initialize_optimization()\n\n ##### History and Checkpoints #####\n self.hasTrained = False\n self._lastSaved = collections.defaultdict(None)\n self.history = collections.defaultdict(list)\n self.saver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestLossSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestAccSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n\n logging.basicConfig(level=logging.INFO)\n log_handler = logging.FileHandler(\"log.txt\")\n logging.getLogger().addHandler(log_handler)\n\n self.summaries = tf.summary.merge_all()", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self):\n super(Model, self).__init__()\n\n self.batch_size = 200\n self.hidden_size = 264\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\n\n self.dense_1 = tf.keras.layers.Dense(self.hidden_size, activation='relu')\n self.dense_2 = tf.keras.layers.Dense(self.hidden_size, activation='relu')", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']", "def build(self, mode):\n assert mode in ['train', 'eval']\n self.mode = mode\n self._setup_misc(mode)\n self._setup_images_and_labels()\n self._build_graph(self.images, self.labels, mode)\n\n self.init = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def start(self):\n self.sess = tf.Session()\n tf.global_variables_initializer().run(session=self.sess)", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def init_machine_learning(self):\n\t\ttry:\n\t\t\tprint(\"debut du chargement! \")\n\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\n\t\t\tself.session = tf.Session()\n\t\t\tnew_saver = tf.train.import_meta_graph(\"./modeles/avancer/model_avancer.meta\")\n\t\t\tnew_saver.restore(self.session, tf.train.latest_checkpoint('./'))\n\t\t\tall_vars = tf.get_collection('vars')\n\n\t\t\t#self.y_conv = all_vars[3]\n\t\t\t#self.keep_prob = all_vars[4]\n\n\t\t\t#for v in all_vars:\n\t\t\t#\tv_ = self.session.run(v)\n\t\t\t#\tprint(v_)\n\n\t\t\tprint(\"chargement terminer\")\n\t\t\t\n\t\texcept:\n\t\t\t\n\t\t\tprint(\"le chargement a echouer ! \\n creation d'un nouveau modele !\")\n\t\t\tself.mnist = input_data.read_data_sets(self.option[\"ch_mnist\"], one_hot=True)\n\n\t\t\tself.session = tf.InteractiveSession()\n\t\t\t#creation des variables\n\t\t\tW_conv1 = self.weight_variable([5, 5, 1, 32])\n\t\t\tb_conv1 = self.bias_variable([32])\n\n\t\t\t# Placeholder\n\t\t\tself.x = tf.placeholder(tf.float32, [None, 784])\n\t\t\ty_ = tf.placeholder(tf.float32, [None, 10])\n\n\t\t\t# Reshape\n\t\t\tx_image = tf.reshape(self.x , [-1,28,28,1])\n\n\t\t\th_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)\n\t\t\th_pool1 = self.max_pool_2x2(h_conv1)\n\n\t\t\tW_conv2 = self.weight_variable([5, 5, 32, 64])\n\t\t\tb_conv2 = self.bias_variable([64])\n\n\t\t\th_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)\n\t\t\th_pool2 = self.max_pool_2x2(h_conv2)\n\n\t\t\tW_fc1 = self.weight_variable([7 * 7 * 64, 1024])\n\t\t\tb_fc1 = self.bias_variable([1024])\n\n\t\t\th_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\t\t\th_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\t\t\t\n\t\t\tself.keep_prob = tf.placeholder(tf.float32)\n\t\t\th_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n\t\t\tW_fc2 = self.weight_variable([1024, 10])\n\t\t\tb_fc2 = self.bias_variable([10])\n\n\t\t\tself.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n\n\t\t\t\t\t\n\t\t\tcross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.y_conv, y_))\n\t\t\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t\t\tcorrect_prediction = tf.equal(tf.argmax(self.y_conv,1), tf.argmax(y_ ,1))\n\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\t\t\tself.session.run(tf.global_variables_initializer())\n\n\t\t\tprint(\"sauvegarde variable\")\n\t\t\ttf.add_to_collection(\"vars\", h_fc1_drop)\n\t\t\ttf.add_to_collection(\"vars\", W_fc2)\n\t\t\ttf.add_to_collection(\"vars\", b_fc2)\n\t\t\ttf.add_to_collection(\"vars\", self.y_conv)\n\t\t\ttf.add_to_collection(\"vars\", self.keep_prob)\n\n\t\t\tprint(\"lancement antrainement modele\")\n\t\t\t\n\t\t\tfor i in range(1000):\n\t\t\t\tbatch = self.mnist.train.next_batch(50)\n\t\t\t\tif i%100 == 0:\n\t\t\t\t\ttrain_accuracy = accuracy.eval(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob : 1.0})\n\t\t\t\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy))\n\t\t\t\ttrain_step.run(feed_dict={self.x : batch[0], y_ : batch[1], self.keep_prob: 0.5})\n\n\t\t\tbatchSize = 5000\n\t\t\tfor i in range(len(self.mnist.train.labels) // batchSize):\n\t\t\t\tbat = self.mnist.test.next_batch(100)\n\t\t\t\tprint(\"test accuracy %g\" % accuracy.eval(feed_dict={self.x : bat[0], y_: bat[1], self.keep_prob: 1.0}))\n\t\t\t\n\t\t\t#sauvegarde des données\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsave_path = saver.save(self.session, \"./modeles/avancer/model_avancer\")\n\t\t\tprint(\"Model saved in file: %s\" % save_path)", "def init(self,sess):\n if not os.path.isfile(\\\n \"./Models/\" + self.mod_name + \".ckpt.meta\"):\n sess.run(tf.global_variables_initializer())\n return 0\n else:\n if self.gen_only:\n sess.run(tf.global_variables_initializer())\n self.load(sess)\n return 1", "def __init__(self, config):\n self.config = config\n etat.UsesTFSession.__init__(self)\n\n # Get path to model\n self.config.download_model_if_necessary()\n model_path = self.config.model_path\n\n # Load model\n self._prefix = \"main\"\n self._graph = etat.load_graph(model_path, prefix=self._prefix)\n self._sess = None\n\n # Load class labels\n labels_map = etal.load_labels_map(self.config.labels_path)\n self._class_labels = etal.get_class_labels(labels_map)\n self._num_classes = len(self._class_labels)\n\n # Get network\n network_name = self.config.network_name\n network_fn = nf.get_network_fn(\n network_name, num_classes=self._num_classes, is_training=False\n )\n self.img_size = network_fn.default_image_size\n\n # Get input operation\n self._input_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + self.config.input_name\n )\n\n # Get feature operation, if necessary\n features_name = None\n if self.config.generate_features:\n if self.config.features_name:\n features_name = self.config.features_name\n elif network_name in _DEFAULT_FEATURES_NAMES:\n features_name = _DEFAULT_FEATURES_NAMES[network_name]\n if features_name is not None:\n self._features_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + features_name\n )\n else:\n self._features_op = None\n\n # Get output operation\n if self.config.output_name:\n output_name = self.config.output_name\n else:\n output_name = _DEFAULT_OUTPUT_NAMES.get(network_name, None)\n if output_name is None:\n raise ValueError(\n \"`output_name` was not provided and network `%s` was not \"\n \"found in default outputs map\" % network_name\n )\n self._output_op = self._graph.get_operation_by_name(\n self._prefix + \"/\" + output_name\n )\n\n # Setup preprocessing\n self._transforms = self._make_preprocessing_fcn(\n network_name, self.config.preprocessing_fcn\n )\n self._preprocess = True\n\n self._last_features = None\n self._last_probs = None", "def __init__(self, model_path, img_width, img_height, gpu_fraction=1.0):\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=['input_1:0', 'cumsum_values_1:0'])\n\n self.img_width = img_width\n self.img_height = img_height", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize_variables(self):\n self.sess.run(self.init)", "def __init__(self, num_steps, model_load_path, num_test_rec):\n\n self.global_step = 0\n self.num_steps = num_steps\n self.num_test_rec = num_test_rec\n\n self.sess = tf.Session()\n self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph)\n\n if c.ADVERSARIAL:\n print 'Init discriminator...'\n self.d_model = DiscriminatorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.SCALE_CONV_FMS_D,\n c.SCALE_KERNEL_SIZES_D,\n c.SCALE_FC_LAYER_SIZES_D)\n\n print 'Init generator...'\n self.g_model = GeneratorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.FULL_HEIGHT,\n c.FULL_WIDTH,\n c.SCALE_FMS_G,\n c.SCALE_KERNEL_SIZES_G)\n\n print 'Init variables...'\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)\n self.sess.run(tf.global_variables_initializer())\n\n # if load path specified, load a saved model\n if model_load_path is not None:\n self.saver.restore(self.sess, model_load_path)\n print 'Model restored from ' + model_load_path", "def __init__(self):\n self.sess = tf.Session()\n vocab_path = os.path.join(params.data_dir, \"vocab%d\" % params.vocab_size)\n self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)\n self.model = model_utils.create_model(self.sess, True)\n self.model.batch_size = 1 # Respond 1 sentence at a time.", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def setup(self, params, training=True, **kwargs):\n\n tf.reset_default_graph()\n\n return super().setup(params=params, training=training, **kwargs)", "def _init_model(self, forrest):\n rels = self.get_rels(forrest)\n self._model = RDPModel(rels)", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def __init__(self, params=None):\n if isinstance(params, SSDParams):\n self.params = params\n else:\n self.params = SSDNet.default_params\n # if cfgs.DATA_FORMAT == \"NHWC\":\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None, None, 3],\n # name=\"input_images\")\n # else:\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3, None, None],\n # name=\"input_images\")\n\n # self.labels_batch = tf.placeholder(dtype=tf.int32, shape=[None, None, cfgs.NUM_CLASS+1], name=\"gt_labels\")\n # self.bboxes_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 4), name=\"gt_bboxes\")\n # self.scores_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, 1), name=\"gt_scores\")\n\n self.global_step = tf.train.get_or_create_global_step()", "def __init__(self, saved_model_path, pipeline_config_path):\r\n\r\n saved_model_path = os.path.realpath(saved_model_path)\r\n assert os.path.exists(saved_model_path)\r\n\r\n # Use tf2onnx to convert saved model to an initial ONNX graph.\r\n graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, \"serve\",\r\n [\"serving_default\"])\r\n log.info(\"Loaded saved model from {}\".format(saved_model_path))\r\n with tf.Graph().as_default() as tf_graph:\r\n tf.import_graph_def(graph_def, name=\"\")\r\n with tf_loader.tf_session(graph=tf_graph):\r\n onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)\r\n onnx_model = optimizer.optimize_graph(onnx_graph).make_model(\"Converted from {}\".format(saved_model_path))\r\n self.graph = gs.import_onnx(onnx_model)\r\n assert self.graph\r\n log.info(\"TF2ONNX graph created successfully\")\r\n\r\n # Fold constants via ONNX-GS that TF2ONNX may have missed.\r\n self.graph.fold_constants()\r\n \r\n # Pipeline config parsing.\r\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\r\n with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:\r\n text_format.Merge(f.read(), pipeline_config)\r\n\r\n # If your model is SSD, get characteristics accordingly from pipeline.config file.\r\n if pipeline_config.model.HasField(\"ssd\"):\r\n # Getting model characteristics.\r\n self.model = str(pipeline_config.model.ssd.feature_extractor.type)\r\n self.height = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width)\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold)\r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.ssd.post_processing.batch_non_max_suppression.max_detections_per_class)\r\n\r\n # If your model is Faster R-CNN get it's characteristics from pipeline.config file.\r\n elif pipeline_config.model.HasField(\"faster_rcnn\"): \r\n self.model = str(pipeline_config.model.faster_rcnn.feature_extractor.type) \r\n # There are two types of image_resizers, select accordingly from pipeline.config file.\r\n if pipeline_config.model.faster_rcnn.image_resizer.HasField(\"fixed_shape_resizer\"):\r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.height)\r\n self.width = int(pipeline_config.model.faster_rcnn.image_resizer.fixed_shape_resizer.width)\r\n elif pipeline_config.model.faster_rcnn.image_resizer.HasField(\"keep_aspect_ratio_resizer\"): \r\n self.height = int(pipeline_config.model.faster_rcnn.image_resizer.keep_aspect_ratio_resizer.max_dimension)\r\n self.width = self.height\r\n else:\r\n log.info(\"Image resizer config is not supported\")\r\n sys.exit(1)\r\n\r\n # Getting model characteristics\r\n self.first_stage_nms_score_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_score_threshold) \r\n self.first_stage_nms_iou_threshold = float(pipeline_config.model.faster_rcnn.first_stage_nms_iou_threshold)\r\n self.first_stage_max_proposals = int(pipeline_config.model.faster_rcnn.first_stage_max_proposals)\r\n self.initial_crop_size = int(pipeline_config.model.faster_rcnn.initial_crop_size)\r\n self.second_score_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.score_threshold)\r\n self.second_iou_threshold = float(pipeline_config.model.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.iou_threshold)\r\n\r\n else: \r\n log.info(\"Given pipeline.config file is not supported\")\r\n sys.exit(1)\r\n\r\n #print(self.model)\r\n #print(self.height)\r\n #print(self.width)\r\n #print(self.first_stage_nms_score_threshold)\r\n #print(self.first_stage_nms_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n #print(self.initial_crop_size)\r\n #print(self.second_score_threshold)\r\n #print(self.second_iou_threshold)\r\n #print(self.first_stage_max_proposals)\r\n\r\n self.batch_size = None", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n\n self._add_placeholders()\n\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._add_seq2seq()\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n\n if self._mode == 'train':\n self._add_train_op()\n\n self._summaries = tf.summary.merge_all()\n\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)", "def create(self):\n # 1st Layer: Conv -> norm -> ReLu\n conv1 = self.conv(x=self.X, stride_y=1, stride_x=1, padding='SAME', name='conv1')\n norm1 = lrn(conv1, 2, 1e-04, 0.75, name='norm1')\n # Apply relu function\n relu1 = tf.nn.relu(norm1)\n\n # 2st Layer: Conv -> norm -> ReLu\n conv2 = self.conv(x=relu1, stride_y=1, stride_x=1, padding='SAME', name='conv2')\n norm2 = lrn(conv2, 2, 1e-04, 0.75, name='norm2')\n # Apply relu function\n relu2 = tf.nn.relu(norm2)\n\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 3st Layer: Conv -> norm -> ReLu\n conv3 = self.conv(x=pool2, stride_y=1, stride_x=1, padding='SAME', name='conv3')\n norm3 = lrn(conv3, 2, 1e-04, 0.75, name='norm3')\n # Apply relu function\n relu3 = tf.nn.relu(norm3)\n\n # 4st Layer: Conv -> norm -> ReLu\n conv4 = self.conv(x=relu3, stride_y=1, stride_x=1, padding='SAME', name='conv4')\n norm4 = lrn(conv4, 2, 1e-04, 0.75, name='norm4')\n # Apply relu function\n relu4 = tf.nn.relu(norm4)\n\n pool4 = tf.nn.max_pool(relu4, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 5st Layer: Conv -> norm -> ReLu\n conv5 = self.conv(x=pool4, stride_y=1, stride_x=1, padding='SAME', name='conv5')\n norm5 = lrn(conv5, 2, 1e-04, 0.75, name='norm5')\n # Apply relu function\n relu5 = tf.nn.relu(norm5)\n\n # 6st Layer: Conv -> norm -> ReLu\n conv6 = self.conv(x=relu5, stride_y=1, stride_x=1, padding='SAME', name='conv6')\n norm6 = lrn(conv6, 2, 1e-04, 0.75, name='norm6')\n # Apply relu function\n relu6 = tf.nn.relu(norm6)\n\n pool6 = tf.nn.avg_pool(relu6, ksize=[1, 4, 4, 1],\n strides=[1, 4, 4, 1],\n padding='SAME')\n\n flattened = tf.reshape(pool6, [-1, 128 * 4])\n self.fc7 = self.fc(flattened, name='fc7')", "def _setup_init(self):\n with tf.variable_scope(\"output\", reuse=True):\n assert self.q_values is not None\n self.policy_proba = tf.nn.softmax(self.q_values)", "def build_inference_graph(self):\n self.build_train_graph()", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def build_graph(self, save_model_path):\n if os.path.exists(\"{}.meta\".format(save_model_path)):\n logger.info(\"Graph existed, ready to be reloaded...\")\n else:\n logger.info(\"No graph can be loaded, so create a new graph...\")\n tf.reset_default_graph()\n # placeholders\n x = self.neural_net_image_input((32, 32, 3))\n y = self.neural_net_label_input(10)\n keep_prob = self.neural_net_keep_prob_input()\n\n # model\n logits_out = self.conv_net(x, keep_prob)\n\n # Name logits_out\n logits_out = tf.identity(logits_out, name='logits')\n\n # loss and optimizer\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_out, labels=y), name='cost')\n optimzer = tf.train.AdamOptimizer(name='optimizer').minimize(loss)\n\n # Accuracy\n correct_pred = tf.equal(tf.argmax(y, axis=1), tf.argmax(logits_out, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\n # print(type(tf.Variable(1)))\n saver = tf.train.Saver()\n if not os.path.exists('./savedModel'):\n os.mkdir('./savedModel')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.save(sess, './savedModel/cnn-model')", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def model_initializer():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n # model.add(tf.keras.layers.Dense(128, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(64, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(32, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n\n model.compile(optimizer='rmsprop',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def reset(self):\n self.close()\n self._sess = tf.Session(graph=self._graph)\n self._sess.run(self._initializer)", "def _init_tf(self):\n assert not self.tf_init_done, \\\n \"this class is not designed to be initialised twice\"\n self.sess = tf.get_default_session()\n self.optimiser = tf.train.AdamOptimizer(learning_rate=self.lr)\n\n # maps problem names to (obs var, q-value var) tuples\n self.obs_qv_inputs = {}\n losses = []\n loss_parts = None\n batch_sizes = []\n for problem in self.problems:\n this_obs_var, this_q_values, this_loss, this_loss_parts \\\n = self._instantiate_net(problem)\n self.obs_qv_inputs[problem.name] = (this_obs_var, this_q_values)\n this_batch_size = tf.shape(this_obs_var)[0]\n losses.append(this_loss)\n batch_sizes.append(tf.cast(this_batch_size, tf.float32))\n if loss_parts is None:\n loss_parts = this_loss_parts\n else:\n # we care about these parts because we want to display them to\n # the user (e.g. how much of my loss is L2 regularisation\n # loss?)\n assert len(loss_parts) == len(this_loss_parts), \\\n 'diff. loss breakdown for diff. probs. (%s vs %s)' \\\n % (loss_parts, this_loss_parts)\n # sum up all the parts\n new_loss_parts = []\n for old_part, new_part in zip(loss_parts, this_loss_parts):\n assert old_part[0] == new_part[0], \\\n \"names (%s vs. %s) don't match\" % (old_part[0],\n new_part[0])\n to_add = new_part[1] * tf.cast(this_batch_size, tf.float32)\n new_loss_parts.append((old_part[0], old_part[1] + to_add))\n loss_parts = new_loss_parts\n self.op_loss \\\n = sum(l * s for l, s in zip(losses, batch_sizes)) \\\n / sum(batch_sizes)\n # this is actually a list of (name, symbolic representation) pairs for\n # components of the loss\n self.loss_part_ops = [(name, value / sum(batch_sizes))\n for name, value in loss_parts]\n\n # Next bit hairy because we want combined grads (and also want to split\n # them out for TensorBoard to look at). Really this is similar to\n # self.op_train = self.optimiser.minimize(loss).\n params = self.weight_manager.all_weights\n # do a check that set(params) is the same as\n param_set = set(params)\n for problem in self.problems:\n their_param_set = set(problem.policy.get_params(trainable=True))\n assert their_param_set == param_set, \\\n \"policy for %s has weird params\" % problem.name\n\n grads_and_vars = self.optimiser.compute_gradients(\n self.op_loss, var_list=params)\n # see https://stackoverflow.com/a/43486487 for gradient clipping\n gradients, variables = zip(*grads_and_vars)\n gradients = list(gradients)\n # for grad, var in grads_and_vars:\n # gradients[0] = tf.Print(gradients[0], [tf.norm(grad), tf.norm(var)], 'grad/var norm for %s:' % var.name)\n grads_and_vars = zip(gradients, variables)\n self.op_train = self.optimiser.apply_gradients(\n grads_and_vars=grads_and_vars)\n for g, v in grads_and_vars:\n tf.summary.histogram(\n 'weight-grads/' + v.name, g, collections=['sl-hists'])\n for slot in self.optimiser.get_slot_names():\n slot_var = self.optimiser.get_slot(v, slot)\n if slot_var is not None:\n dest_name = 'slots-' + slot + '/' + v.name\n tf.summary.histogram(\n dest_name, slot_var, collections=['sl-hists'])\n\n # \"weights\" is probably set by some code somewhere deep in RLLab\n # TODO: this is probably not the best idea. Maybe do weight hist stuff\n # *here*?\n weight_op = tf.summary.merge_all('weights')\n # 'summaries_f_prob' (for activations) is set up in\n # CategoricalMLPPolicy.__init__. Again I stuck it deep in RLLab because\n # I'm an idiot.\n act_op = tf.summary.merge_all('sl-activations')\n tf.summary.merge([act_op, weight_op], collections=['sl-hists'])\n self.op_summary = tf.summary.merge_all('sl-hists')\n\n # tensorboard ops\n self._log_ops = {}\n\n self.sess.run(tf.global_variables_initializer())\n\n self.tf_init_done = True", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def __init__(self, sess, save_folder, file_name, **kwargs):\n\n tf.logging.info('Building graph for low dimensional score metric')\n self._build_graph(**kwargs)\n\n self.build_summaries()\n tf.logging.info('Summary operator made')\n\n self.sess = sess\n self.initialize_model(save_folder, file_name, sess)\n tf.logging.info('Model initialized')", "def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess", "def _initialize_local_and_global_variables(self):\n variables_initialization_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(variables_initialization_op)", "def __init__(self, MY_GRAPH_PATH):\n self.graph = tf.Graph()\n\n graph_def = None\n with tf.gfile.FastGFile(MY_GRAPH_PATH, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)", "def build_tf_graph(self):\n raise NotImplementedError", "def compile(self, seed=42):\n ops.reset_default_graph()\n self._log_params() # Small trick to get all the variables and log them\n # Create the graph object\n with tf.device(\"/gpu:0\"):\n logger.info(\"Building graph...\")\n tf.set_random_seed(seed)\n self.global_step = tf.get_variable(name=\"global_step\",\n shape=[],\n dtype='int32',\n initializer=tf.constant_initializer(0),\n trainable=False)\n self._create_placeholders()\n self._setup_graph_def()\n\n self._add_scalar_summary(self.loss)\n if self.eval_metric is not None:\n self._add_scalar_summary(self.eval_metric)\n self._is_graph_build = True", "def __init__(self, state_size, action_size, scope='global', layer_size=np.array([400, 300])):\n self.state_size = state_size\n self.action_size = action_size\n self.scope = scope\n with tf.variable_scope(scope):\n self.inputs = tf.placeholder(shape=[None, state_size], dtype=tf.float32)\n self.layers = [self.inputs]\n for i in range(len(layer_size)):\n self.layers.append(slim.fully_connected(self.layers[i], int(layer_size[i]), activation_fn=tf.nn.relu))\n\n self.policyLayer = slim.fully_connected(self.layers[-1], action_size, activation_fn=tf.nn.tanh)\n # Get the index of the highest output from the neural network\n self.maxOutputNode = tf.argmax(self.policyLayer, 1)", "def __init__(self, model='facenet-20180402-114759.pb'):\n print('Load Frozen Graph')\n\n with tf.gfile.FastGFile(os.path.join(os.path.dirname(__file__), \"weights\", model),\n 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n self.graph = tf.get_default_graph()\n\n print('Ended loading frozen graph')", "def __init__(self, model_path, gpu_fraction=1.0,\n input_name = 'input_1:0',\n output_name = 'output_node0:0',\n optimize = True,\n optimizer_args = None):\n\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n\n if optimize:\n if type(output_name) == list:\n sensitive_nodes = output_name\n else:\n sensitive_nodes = [output_name]\n graph_def = optimizeGraph(graph_def,\n sensitive_nodes,\n optimizer_args)\n if type(output_name) == list:\n return_elements = [input_name, *output_name]\n tensors = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n # The first is an input\n self.input_tensor = tensors[0]\n # The rest are outputs\n self.output_tensor = tensors[1:]\n else:\n return_elements = [input_name, output_name]\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n\n self.input_shape = self.input_tensor.get_shape().as_list()", "def fit_eval(self, sess):\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allow_growth = True\n self.sess = tf.Session(config=tfconfig)\n self.sess.run(tf.global_variables_initializer())\n self.new_saver=tf.train.import_meta_graph(self.meta_graph_path)\n self.new_saver.restore(sess,self.model_path)\n #graph = tf.get_default_graph()\n self.X_inputs=tf.get_collection(\"model.X_inputs\")[0]\n self.y_inputs=tf.get_collection(\"model.y_inputs\")[0]\n self.y_pred_meta=tf.get_collection(\"model.y_pred\")[0]\n self.lr=tf.get_collection(\"lr\")[0]\n self.batch_size=tf.get_collection(\"batch_size\")[0]\n self.keep_prob=tf.get_collection(\"keep_prob\")[0]\n self.attention=tf.get_collection(\"attention\")[0]\n self.correct_prediction_bilstm= tf.equal(tf.cast(tf.argmax(self.attention, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.correct_prediction_attention = tf.equal(tf.cast(tf.argmax(self.y_pred_meta, 1), tf.int32), tf.reshape(self.y_inputs, [-1]))\n self.accuracy_attention = tf.reduce_mean(tf.cast(self.correct_prediction_attention, tf.float32))\n self.accuracy_bilstm = tf.reduce_mean(tf.cast(self.correct_prediction_bilstm, tf.float32))\n saver = tf.train.Saver(max_to_keep=3)\n saver.restore(sess, tf.train.latest_checkpoint(self.model.checkpoint_path))\n X_batch, y_batch = self.batch_gen.__next__()\n test_fetches = [self.attention, self.accuracy_attention, self.accuracy_bilstm, self.y_pred_meta]\n feed_dict = {self.X_inputs:X_batch, self.y_inputs:y_batch, self.lr:self._lr, self.batch_size:10, self.keep_prob:1.0}\n _att_pred, _att_acc, _bilstm_acc , _bilstm_pred = sess.run(test_fetches, feed_dict)\n print(_att_pred,_bilstm_pred, _att_acc, _bilstm_acc)\n return _att_pred,_bilstm_pred, _att_acc, _bilstm_acc", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build_graph(self):\n train_graph = tf.Graph()\n opts = self._options\n with train_graph.as_default():\n self.__inputs, self.__doc_inputs, self.__labels, self.__lr = self._get_inputs()\n embed, word_embeddings, combined_embed_vector_length = self._get_embedding_layer(\n self.__inputs, self.__doc_inputs)\n\n norm_w = tf.sqrt(tf.reduce_sum(tf.square(word_embeddings), 1, keep_dims=True))\n self.__normalized_word_embeddings = word_embeddings / norm_w\n\n weights = tf.Variable(\n tf.truncated_normal((self.vocab_size, combined_embed_vector_length),\n stddev=1.0 / math.sqrt(combined_embed_vector_length))\n )\n biases = tf.Variable(tf.zeros(self.vocab_size))\n\n if opts.loss == 'softmax':\n loss = tf.nn.sampled_softmax_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"Softmax loss\", loss)\n else:\n loss = tf.nn.nce_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"NCE loss\", loss)\n\n self.__cost = tf.reduce_mean(loss)\n\n if opts.train_method == 'Adam':\n self.__optimizer = tf.train.AdamOptimizer(self.__lr).minimize(self.__cost)\n else:\n self.__optimizer = tf.train.GradientDescentOptimizer(self.__lr).minimize(self.__cost)\n\n self.__summary = tf.summary.merge_all()\n\n self._session = tf.Session(graph=train_graph)\n self.saver = tf.train.Saver()\n return self", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def __init__(self):\n self._graph = tf.Graph()\n self._session = tf.compat.v1.Session(graph=self._graph)\n\n # This lock is for multi-threaded contexts where multiple threads\n # share the same EvalSavedModel.\n #\n # Locking is required in the case where there are multiple threads using\n # the same EvalMetricsGraph. Because the metrics variables are part of the\n # session, and all threads share the same session, without a lock, the\n # \"reset-update-get\" steps may not be atomic and there can be races.\n #\n # Having each thread have its own session would also work, but would\n # require a bigger refactor.\n # TODO(b/131727905): Investigate whether it's possible / better to have\n # each thread have its own session.\n self._lock = threading.Lock()\n\n # Variables that need to be populated.\n\n # The names of the metric.\n self._metric_names = []\n\n # Ops associated with reading and writing the metric variables.\n self._metric_value_ops = []\n self._metric_update_ops = []\n self._metric_variable_assign_ops = []\n\n # Nodes associated with the metric variables.\n self._metric_variable_nodes = []\n\n # Placeholders and feed input for the metric variables.\n self._metric_variable_placeholders = []\n self._perform_metrics_update_fn_feed_list = []\n self._perform_metrics_update_fn_feed_list_keys = []\n\n # OrderedDicts that map features, predictions, and labels keys to their\n # tensors.\n self._features_map = {}\n self._predictions_map = {}\n self._labels_map = {}\n\n # Ops to set/update/reset all metric variables.\n self._all_metric_variable_assign_ops = None\n self._all_metric_update_ops = None\n self._reset_variables_op = None\n\n # Callable to perform metric update.\n self._perform_metrics_update_fn = None\n\n # OrderedDict produced by graph_ref's load_(legacy_)inputs, mapping input\n # key to tensor value.\n self._input_map = None\n\n self._batch_size = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size'))\n self._batch_size_failed = (\n beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,\n 'batch_size_failed'))\n\n try:\n self._construct_graph()\n except (RuntimeError, TypeError, ValueError,\n tf.errors.OpError) as exception:\n general_util.reraise_augmented(exception, 'Failed to create graph.')", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def load_resnet(self, resnet_dir, keep_last=False):\n ckpt = tf.train.latest_checkpoint(resnet_dir)\n with tf.Session(config=self.config) as sess:\n # init model\n init = [tf.global_variables_initializer(), tf.local_variables_initializer()]\n sess.run(init)\n if keep_last:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name]\n else:\n restore_var = [v for v in tf.global_variables() if 'global_step' not in v.name and 'mode' not in v.name\n and 'conv6' not in v.name]\n loader = tf.train.Saver(var_list=restore_var)\n # load model\n self.load(ckpt, sess, loader)", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def __init__(self, model):\r\n self._tensorflow_session = model._tensorflow_session\r\n self._model = model", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def _setup(self):\n\n # caffe-tensorflow/convert.py can only run with Python2. Since the default encoding format of Python2 is ASCII\n # but the default encoding format of Python3 is UTF-8, it will raise an error without 'encoding=\"latin1\"'\n weight_dict = np.load(self.vgg16_path, encoding=\"latin1\").item()\n\n scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3',\n 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3']\n for scope in scopes:\n with tf.variable_scope(scope.split('_')[0] + '/' + scope, reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w_init_op = weights.assign(weight_dict[scope]['weights'])\n b_init_op = biases.assign(weight_dict[scope]['biases'])\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc6', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc6']['weights']\n b = weight_dict['fc6']['biases']\n w = np.reshape(w, (7, 7, 512, 4096))\n w = w[0:-1:2, 0:-1:2, :, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc7', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc7']['weights']\n b = weight_dict['fc7']['biases']\n w = np.reshape(w, (1, 1, 4096, 4096))\n w = w[:, :, 0:-1:4, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)", "def __init__(self, in_seq_length, out_seq_length, hidden_dim,\n n_epochs=1500, learning_rate=0.0001,\n save_file='./forecastnet.ckpt', model='dense'):\n # Initialize variables passed\n self.in_seq_length =in_seq_length\n self.out_seq_length = out_seq_length\n self.hidden_dim = hidden_dim\n self.n_epochs = n_epochs\n self.learning_rate = learning_rate\n self.save_file = save_file\n self.model = model\n\n # Reset the default graph\n tf.reset_default_graph()\n\n # Set random seed to keep consistent results\n # tf.set_random_seed(1)\n\n # Create the placeholders for the TensorFlow graph\n self.X, self.Y, self.is_training = self.create_placeholders()\n\n # Build the TensorFlow graph\n self.build_graph()\n\n # Define the tensorflow optimizer. Use an AdamOptimizer.\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n\n # Print the number of trainable parameters of the model\n print('Trainable variables = ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))\n print('')", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def _build_graph(self, inputs):\n\n # inputs contains a list of input variables defined above\n input_from_sensor1, input_from_sensor2, label = inputs\n print \"ok\"\n print input_from_sensor1\n # In tensorflow, inputs to convolution function are assumed to be\n # NHWC. Add a single channel here.\n #image = tf.expand_dims(image, 3)\n\n #image = image * 2 - 1 # center the pixels values at zero\n # The context manager `argscope` sets the default option for all the layers under\n # this context. Here we use 32 channel convolution with shape 3x3\n\n sensor1 = Sequential('sensor1', input_from_sensor1) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)() \n\n print sensor1\n\n sensor2 = Sequential('sensor2', input_from_sensor2) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n output = Connect('cloud', [sensor1, sensor2], \"inner_product\") \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n tf.nn.softmax(output, name='prob') # a Bx10 with probabilities\n\n #g = tf.get_default_graph()\n #for v in g.as_graph_def().node:\n # print v.name\n\n # a vector of length B with loss of each sample\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss\n\n correct = tf.cast(tf.nn.in_top_k(output, label, 1), tf.float32, name='correct')\n accuracy = tf.reduce_mean(correct, name='accuracy')\n\n # This will monitor training error (in a moving_average fashion):\n # 1. write the value to tensosrboard\n # 2. write the value to stat.json\n # 3. print the value after each epoch\n train_error = tf.reduce_mean(1 - correct, name='train_error')\n summary.add_moving_summary(train_error, accuracy)\n\n # Use a regex to find parameters to apply weight decay.\n # Here we apply a weight decay on all W (weight matrix) of all fc layers\n wd_cost = tf.multiply(1e-5,\n regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n\n self.cost = tf.add_n([wd_cost, cost], name='total_cost')\n\n summary.add_moving_summary(cost, wd_cost, self.cost)\n\n # monitor histogram of all weight (of conv and fc layers) in tensorboard\n summary.add_param_summary(('.*/W', ['histogram', 'rms']))" ]
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365", "0.6238946", "0.6238248", "0.6228969", "0.62260884", "0.62216014", "0.61992395", "0.6188734", "0.6182232", "0.6179242", "0.6167892", "0.6159257", "0.6145693", "0.61373615", "0.61370456", "0.6135909", "0.61055654", "0.6098847", "0.60771847", "0.6072279", "0.6068658", "0.6060275", "0.60361105", "0.60349673", "0.6022777", "0.59995234", "0.5975665", "0.59745455", "0.59699357", "0.59679097", "0.59655076", "0.5944577", "0.59443724", "0.5942477", "0.5916095", "0.5910781", "0.5903678", "0.59029704", "0.5899744", "0.5894725", "0.5892079", "0.58791643", "0.5875119", "0.5865222", "0.585884", "0.58562565", "0.5853644", "0.58522123", "0.5844606", "0.5841884", "0.5839001", "0.5832439", "0.5830157", "0.582558", "0.58121085", "0.58112323", "0.5808803", "0.5806357", "0.580387", "0.57945246", "0.5793703", "0.5792321", "0.5785907", "0.57851183", "0.57754564", "0.57655543", "0.57646304", "0.57595575", "0.57500404", "0.57434803", "0.574348", "0.57424086", "0.5735521", "0.57310385", "0.5730755", "0.57296336", "0.5727137", "0.57193434", "0.57140994", "0.57094336", "0.570941" ]
0.0
-1
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')", "def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])", "def load_weights(self, filepath):\n self.model.load_weights(filepath)", "def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_initial_weights(self, sess, weights_path, SKIP_LAYER):\r\n # Load the weights into memory\r\n weights_dict = np.load(weights_path, encoding='bytes').item()\r\n\r\n # list of all assignment operators\r\n # Loop over all layer names stored in the weights dict\r\n for op_name in weights_dict:\r\n\r\n # Check if layer should be trained from scratch\r\n if op_name not in SKIP_LAYER:\r\n\r\n with tf.variable_scope('model/source/' + op_name, reuse=True):\r\n\r\n # Assign weights/biases to their corresponding tf variable\r\n for data in weights_dict[op_name]:\r\n\r\n # Biases\r\n if len(data.shape) == 1:\r\n var = tf.get_variable('biases', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))\r\n\r\n # Weights\r\n else:\r\n var = tf.get_variable('weights', trainable=False)\r\n # print(toMagenta(var.name))\r\n sess.run(var.assign(data))", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_weights(self, file):\n self.model.load_weights(file)\n return", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_model_weights(self, filename):\n self.model.load_weights(filename)", "def load_weights(model, fpath):\n state = torch.load(fpath)\n model.load_state_dict(state['state_dict'])", "def load_tf_weights_in_bert(model, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load(self, filename):\n self.model.load_weights(filename)", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_ckpt(model,\n weight_path,\n **kargs):\n #model.set_state_dict(state_dict)\n\n if not osp.isfile(weight_path):\n raise IOError(f'{weight_path} is not a checkpoint file')\n #state_dicts = load(weight_path)\n\n logger = get_logger(\"paddlevideo\")\n state_dicts = paddle.load(weight_path)\n if \"VisionTransformer\" in str(model): # For TimeSformer case\n tmp = pretrain_vit_param_trans(model, state_dicts, kargs['num_patches'], kargs['seg_num'], kargs['attention_type'])\n else:\n tmp = {}\n total_len = len(model.state_dict())\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n for item in tqdm(model.state_dict(), total=total_len, position=0):\n name = item\n desc.set_description('Loading %s' % name)\n if name not in state_dicts: # Convert from non-parallel model\n if str('backbone.' + name) in state_dicts:\n tmp[name] = state_dicts['backbone.' + name]\n else: # Convert from parallel model\n tmp[name] = state_dicts[name]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n model.set_state_dict(tmp)", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, checkpoint_path, silent = False):\n ckc = CheckpointCache(checkpoint_path)\n\n if not self.built:\n dymmy_inputs = np.array([[0,1,2]])\n self([dymmy_inputs])\n \n symbolic_weights = self.trainable_weights + self.non_trainable_weights\n \n variable_keys = [self._clean_weight_name(symbolic_weight.name) for symbolic_weight in symbolic_weights]\n variable_keys = [self._convert_variable_name(key) for key in variable_keys]\n\n unloaded_keys = set(ckc.keys()) - set(variable_keys)\n if not silent:\n print('unused keys:', unloaded_keys)\n \n values = [ckc.get_values(key) for key in variable_keys]\n \n name_value_pair = []\n\n for weight, value in zip(symbolic_weights, values):\n if weight.shape != value.shape:\n raise ValueError(f'The shape of {weight.name} is {weight.shape} but shape from checkpoint is {value.shape}.')\n if weight.dtype != value.dtype:\n raise ValueError(f'The type of {weight.name} is {weight.dtype} but type from checkpoint is {value.dtype}.')\n \n name_value_pair.append((weight, value))\n \n K.batch_set_value(name_value_pair)\n \n return unloaded_keys", "def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)", "def load_checkpoint(filename: str) -> CheckpointData:\n return torch.load(filename)", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_weights_file(self, file_path):\n\n # Load the weights\n self._cnn_model.load_weights(file_path)", "def load_checkpoint(filename, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n return torch.load(filename)\r\n else:\r\n return torch.load(filename, map_location=lambda storage, loc: storage)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_weights(self, the_path):\n self.model.load_state_dict(torch.load(the_path))", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_initial_weights(self, session, SKIP_LAYER=[]):\n if SKIP_LAYER:\n self.SKIP_LAYER = SKIP_LAYER\n \n layer_params = np.load(self.WEIGHTS_PATH, encoding = \"latin1\").item()\n \n # Loop over all layer names stored in the weights dict\n for op_name in layer_params:\n # Check if the layer is one of the layers that should be reinitialized\n if op_name not in self.SKIP_LAYER:\n with tf.variable_scope(op_name, reuse = True):\n # Loop over list of weights/biases and assign them to their corresponding tf variable\n print(\"load layer params:%s\" % op_name)\n for key in layer_params[op_name]:\n data = layer_params[op_name][key]\n # Biases\n if len(data.shape) == 1:\n var = tf.get_variable('biases', trainable = False)\n session.run(var.assign(data))\n # Weights\n else:\n var = tf.get_variable('weights', trainable = False)\n session.run(var.assign(data))", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def _load_weights(self):\n self.npz_weights = np.load(self._weight_file)\n self._load_byte_embedding()\n self._load_cnn_weights()\n self._load_highway()\n self._load_projection()", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def load_graph(filename):\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\ttf.import_graph_def(graph_def, name='')", "def load_pretrained_net_weights(net, ckpt_path):\n print(\"Loading Model: \", ckpt_path)\n print('')\n\n net.load_weights(ckpt_path).expect_partial()", "def load_model(sess, meta_file, checkpoint_file):\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n \n configs = tf.get_collection('configs')\n pvars = tf.get_collection('placeholders')\n \n model_settings = dict()\n for c in configs:\n name = c.name.split(':')[0]\n model_settings[name] = sess.run(c)\n \n model_vars = dict()\n for p in pvars:\n name = p.name.split(':')[0]\n model_vars[name] = p\n model_vars['probs'] = tf.get_collection('probs')[0]\n \n return model_settings, model_vars", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_pretrained_weights(model, weight_path):\n checkpoint = load_checkpoint(weight_path)\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n model_dict = model.state_dict()\n new_state_dict = OrderedDict()\n matched_layers, discarded_layers = [], []\n\n for k, v in state_dict.items():\n if k.startswith('module.'):\n k = k[7:] # discard module.\n\n if k in model_dict and model_dict[k].size() == v.size():\n new_state_dict[k] = v\n matched_layers.append(k)\n else:\n discarded_layers.append(k)\n\n model_dict.update(new_state_dict)\n model.load_state_dict(model_dict)\n\n if len(matched_layers) == 0:\n warnings.warn(\n 'The pretrained weights \"{}\" cannot be loaded, '\n 'please check the key names manually '\n '(** ignored and continue **)'.format(weight_path)\n )\n #else:\n #print(\n # 'Successfully loaded pretrained weights from \"{}\"'.\n # format(weight_path)\n #)\n #if len(discarded_layers) > 0:\n # print(\n # '** The following layers are discarded '\n # 'due to unmatched keys or layer size: {}'.\n # format(discarded_layers)\n # )", "def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)", "def load_weights(self, file_path):\n self.model.load_weights(file_path + '/policy_network.h5')\n print(\"\\nrestored weights of the policy network.\\n\")", "def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_weights(self, weights):\n weight = np.load(weights)\n return weight", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n pass\n else:\n raise TypeError('pretrained must be a str or None')", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_weights(self, weights):\n\n i = 0\n for l in range(1, self.num_layers()):\n for n in range(self.get_layer(l).num_nodes):\n for w in range(len(self.get_node_with_layer(l, n).weights)):\n self.get_node_with_layer(l, n).weights[w] = weights[i]\n i += 1", "def restore(self, weights_file):\r\n\r\n self.model.load_weights(weights_file, by_name=True)", "def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_weights(self):\n try:\n print('loading weights from {}'.format(self.cfg.class_model_dir))\n self.load_state_dict(torch.load(self.cfg.class_model_dir + self.class_model_name + '.pth'))\n except Exception as e:\n print(\"load weights exception: {}\".format(e))", "def load_model_states_from_checkpoint(model, filename, tag, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n checkpoint = torch.load(filename)\r\n else:\r\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\r\n model.load_state_dict(checkpoint[tag])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_model(self, file_name):\n\t\tself.model.load_weights(file_name)", "def init_weights(self, pretrained=None, strict=True):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=strict, logger=logger)\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. But received {type(pretrained)}.')", "def load_weights(self, file_path, format=None, in_order=True, skip=False):\n _load_weights(self, file_path, format, in_order, skip)", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def resnet_init_from_checkpoint_fn(checkpoint):\n logging.info('Initializing model weights from %s', checkpoint)\n assignment_map = {}\n resnet_scope = _get_resnet_scope()\n for var in contrib_framework.get_variables(\n scope=resnet_scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n if 'dense' not in var.op.name:\n # Remove the parent scope prefix.\n name_in_ckpt = var.op.name.replace(resnet_scope, 'resnet_model/')\n assignment_map[name_in_ckpt] = var\n tf.train.init_from_checkpoint(checkpoint, assignment_map)" ]
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.6727852", "0.6713089", "0.6706459", "0.6683782", "0.6658891", "0.66465485", "0.6617412", "0.6616446", "0.6595445", "0.6595445", "0.65860444", "0.6582845", "0.65760124", "0.6567407", "0.6567407", "0.655", "0.654494", "0.654308", "0.65425074", "0.65225554", "0.6519562", "0.6502056", "0.64976", "0.6494225", "0.648421", "0.6479", "0.6476018", "0.64715683", "0.64661586", "0.64661586", "0.64654607", "0.64533824", "0.6452823", "0.64477986", "0.64477986", "0.64337814", "0.64327794", "0.6423933", "0.6423651", "0.6417821", "0.63964593", "0.63902766", "0.6384771", "0.63653183", "0.63635445", "0.635375", "0.63508856", "0.6348675", "0.6344284", "0.6343296", "0.6342838", "0.63309", "0.6330147", "0.6329762", "0.63064724", "0.6300289", "0.62704086", "0.62672496", "0.62668973", "0.6265366", "0.62633765", "0.6259392", "0.6257295", "0.6255254", "0.62301844", "0.6229952", "0.6226355", "0.62140304", "0.62128186", "0.6211067", "0.61976856", "0.61976856", "0.61963856", "0.6194719", "0.6172211", "0.61630845", "0.61520475", "0.61514324" ]
0.77680707
0
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_model_params(model):\n for param, value in zip(model.param_names, model.parameters):\n print(\"{:0.4f}\\t{}\".format(value, param))", "def print_params(self):\n print(self._list_params())", "def _get_parameters(self) -> list:\n return self.parameters", "def get_params(self):\n return []", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def get_params(self):\n return list(self.params.values())", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def get_params_list():\n return common.QOL_PARAMS", "def get_resource_params():\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def model_parameters(self) -> Iterator[Tuple[str, torch.Tensor]]:\n return self._model.named_parameters()", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def param(self):\n return []", "def param(self):\n return []", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def parameter_lists_for_model(self, model: AbstractPriorModel) -> List[float]:\n if self.is_path_kwargs:\n paths = model.all_paths\n else:\n paths = model.all_names\n\n return self.parameter_lists_for_paths(paths)", "def parameters(self):\n return self._params", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def get_model_params(self):\n return self._model_params", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def param(self):\r\n\r\n return []", "def get_params(self):\n pass", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameters(self):\n return self.pars", "def parameters(self):\n return self._params", "def param(self):\r\n return []", "def help(cls):\n print(cls._LIST_PARAMETERS)", "def get_params(self):", "def parameters(self):\n return self.vars", "def get_params(self):\n raise NotImplementedError", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def list_param(command):\n namespace = app.main(command)\n assert namespace.command == 'lp' or namespace.command == \"listparam\"", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def parameters(self):\n return [o.parameters for o in self.obs]", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def show_params(self):\n \n return self.params[self.profile]", "def params(self):\n\t\treturn self.params_", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def parameters(self):\n pass", "def params(self):\n return self._pars", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def params(self) -> List[ParamSpec]:\n return self._params", "def parameters(self):\n\n return self._parameters", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def get_params(self):\n\n return self.params_", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def _list_params(self, the_list: List):\n return [p for e in the_list for p in self._params(e)]", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def get(self):\n return self.params", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def get_params_iter(self):\n return []", "def get_embed_params(model) -> List:\r\n return [param for name, param in model.named_parameters() if \"embed\" in name]", "def parameters(self):\n return self.trainer_parameters", "def parameters(self):\n return self.trainer_parameters", "def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters", "def parameters(self):", "def params():\n raise NotImplementedError", "def get_param_names(self):\n return list(self.params.keys())", "def param_values(self):\n return self._param_values", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def get(self, *args):\n return _libsbml.ListOfParameters_get(self, *args)", "def params(self) -> Munch:\n return self._params", "def get_parameters(self):\n return self.context.params", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_params(self):\n params = []\n params.append(('rows', str(self._rows)))\n if self._page > 1:\n params.append(('start', str((self._page - 1) * self._rows)))\n\n return params", "def parameters(self):\n return self._default_params", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> List[str]:", "def get_parList(self):\n parList = []\n for modelName in self._modelList:\n model = self.__modelDict[modelName]\n modelParDict = model.parFitDict\n for parName in modelParDict.keys():\n parList.append(modelParDict[parName][\"value\"])\n return parList" ]
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.6712457", "0.6694902", "0.66933364", "0.6691488", "0.66579175", "0.665548", "0.6649228", "0.66487366", "0.66479886", "0.66403294", "0.66145444", "0.6569835", "0.6568972", "0.65615726", "0.65540904", "0.6544046", "0.6527509", "0.6527509", "0.6527509", "0.6526537", "0.65207803", "0.65207803", "0.6512", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6503212", "0.6486162", "0.64849406", "0.64796937", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6477032", "0.6457764", "0.64506465", "0.64479744", "0.6446706", "0.6440776", "0.6440776", "0.6440776", "0.6433494", "0.63789225", "0.6367834", "0.6361364", "0.632063", "0.6302258", "0.6289967", "0.6287379", "0.62705636", "0.6267131", "0.62598896", "0.62573093", "0.6242155", "0.6225191", "0.62155", "0.62155", "0.6212865", "0.62126464", "0.6211837", "0.61864233", "0.6184046", "0.6182091", "0.6181579", "0.61734796", "0.61494285", "0.61487633", "0.61447823", "0.61333835", "0.61279726", "0.6113043", "0.6107981", "0.61042255" ]
0.61706924
92
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['mobilenet_v2']()): net, end_points = networks_map['mobilenet_v2']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'Logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _get_layers(self) :\n \n return self._layers", "def add_bluprint_layers(self, models_dict, graph, class_names, dimensions,\n show_activation, show_constant_input):\n top_model = models_dict['top-model']\n # Get the top-model color list containing\n # the base color and the layer shades\n top_model_color = top_model[1]\n # get the model\n top_model = top_model[0]\n\n # Get the layers of the model\n layers = top_model[\"config\"][\"layers\"]\n # Loop through the layers\n for layer in layers:\n # If the layer is not a model\n if layer[\"class_name\"] != \"Model\":\n # Get the layer name\n layer_name = layer[\"name\"]\n # If label only layer's class name\n if class_names:\n # Get the layer's information\n layer_info = self.viz_utils.query_layer(layer_name,\n models_dict)\n # Get the layer's class name\n layer_class = layer_info['class_name']\n # If the layer is a a constant input layer,\n # manually specify the class name\n if layer_name.find('constant_input') != -1:\n layer_class = 'Constant Input'\n # Depending on the class name\n # find the the layer shade\n # If the layer is a constant_input layer\n # the color is black\n model_color = top_model_color[1].get(layer_class, \"black\")\n else:\n # If don't use class names for layers\n # then use the layer name from the JSON\n layer_class = layer_name\n model_color = top_model_color[0]\n\n\n # Add the node to the graph\n graph = self.viz_utils.add_nodes(layer_name, graph,\n layer_class, model_color,\n dimensions, show_constant_input)\n\n # Add Blueprint Inbound Edges\n graph = self.connect_blueprint_inbounds(models_dict,\n layer, graph,\n class_names, dimensions,\n show_activation, show_constant_input)\n else:\n # Add Softmod\n graph = self.connect_softmod_in_blueprint(models_dict,\n layer, graph, class_names,\n dimensions, show_activation, show_constant_input)\n\n return graph", "def get_all_layers(model):\n layers = []\n for l in model.layers:\n if hasattr(l, 'layers'):\n layers += get_all_layers(l)\n else:\n layers.append(l)\n return layers", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def layers(self):\n return self['layers']", "def layers(self) ->Optional[nn.ModuleList]:\n return self._layers", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def layers(self, x):\n raise NotImplementedError", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def UpdateLayers(self):\n pass", "def run(layers):", "def getLayers(self):\n return self.__layers", "def make_feature_layers(self, config):\n raise NotImplementedError", "def layers(self, layers):\n\n self._layers = layers", "def layers(self):\r\n return self._flc.layers", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def print_layers(model):\r\n for i in range(len(model.layers)):\r\n print(\"Printing layer shape: %d\" % i, model.layers[i])\r\n weights = model.layers[i].get_weights()\r\n for weight in weights: # Layer type\r\n print(weight.shape)", "def get_trainable_layers(self):\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def __repr__(self):\n return misc.describe_layer(self, name=\"model\")", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)", "def inception_model(layer_names):\n \n### START CODE HERE ###\n # Load InceptionV3 with the imagenet weights and **without** the fully-connected layer at the top of the network\n inception = tf.keras.applications.inception_v3.InceptionV3(include_top=False,weights='imagenet')\n\n # Freeze the weights of the model's layers (make them not trainable)\n inception.trainable = False\n \n # Create a list of layer objects that are specified by layer_names\n output_layers = [inception.get_layer(name).output for name in layer_names]\n\n # Create the model that outputs the content and style layers\n model = tf.keras.Model(inputs=inception.input, outputs=output_layers)\n \n # return the model\n return model", "def get_layers(model):\n layers = []\n for child in model.children():\n layer_name = child.__class__.__name__\n if layer_name in CONV_OPS:\n layers.append(Layer.from_conv(child))\n elif layer_name in UP_OPS:\n layers.append(Layer.from_up(child))\n else:\n layers.extend(get_layers(child))\n return layers", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def get_all(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n layers.append(layer)\n\n return layers", "def GetLayers(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetLayers(self, *args)", "def model_number_layers(model):\n for idx, layer in enumerate(model.layers):\n print(idx, layer.name)", "def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def loadLayers(self,\n layers: List[keras.layers.Layer]) -> None:\n self.__layers = layers\n\n # Model has been reset, redraw view\n self.modelReset.emit()\n return", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )", "def predict_visualize_layers(self, X):\n\n if isinstance(X, np.ndarray):\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n elif isinstance(X, tf.data.Dataset):\n X = X.map(self._reshape)\n\n for x, y in X:\n x = tf.expand_dims(x, 0)\n\n plt.title(\"Test Sample Input\")\n plt.grid(False)\n plt.imshow(x[0, :, :, 0], aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n layer_outputs = [layer.output for layer in self.model.layers]\n visualisation_model = tf.keras.models.Model(inputs=self.model.input, outputs=layer_outputs)\n\n visualisations = visualisation_model.predict(x)\n\n images_per_row = 4\n\n for layer_name, layer_activation in zip(map(lambda x : x.name, layer_outputs[:3]), visualisations[:3]):\n n_features = layer_activation.shape[-1]\n size = layer_activation.shape[1:3]\n n_cols = n_features // images_per_row\n grid = np.zeros((size[0] * n_cols, images_per_row * size[1]))\n\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n grid[col * size[0]: (col + 1) * size[0], row * size[1]: (row + 1) * size[1]] = channel_image\n\n plt.figure(figsize=(1. / size[0] * grid.shape[1], 3. / size[1] * grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(grid, aspect='auto', cmap='plasma', origin='lower')\n plt.colorbar()\n plt.show()\n\n pred = np.argmax(visualisations[-1])\n print(f\"Predicted class: {Genre(pred)} with probability {visualisations[-1][0][pred]}\\n\"\n + f\"Actual class: {Genre(y)}\")", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n \n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def transformer_layers(self):\n return self._transformer_layers", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def hidden_layers(self):\n\t\tif self.hidden is None:\n\t\t\tself.hidden, self.inputs, self.weights_all, self.biases_all = [], [], [], []\n\t\t\tlast_hidden = self.x\n\t\t\tif self.covnet == 1:\n\t\t\t\ty_conv, self._drouput, self.hidden, self.inputs = deepnn(self.x)\n\t\t\telif self.covnet == 2:\n\t\t\t\ty_c, self.hidden, self.inputs = multi_layer_perceptron(self.x, self.input_size, self.num_of_classes,\n\t\t\t\t self.layerSize[0], self.layerSize[1])\n\t\t\telse:\n\n\t\t\t\tself._drouput = 'dr'\n\t\t\t\t# self.hidden.append(self.x)\n\t\t\t\tfor i in range(1, len(self.all_layer_sizes)):\n\t\t\t\t\tname_scope = 'hidden' + str(i - 1)\n\t\t\t\t\trow_size, col_size = self.all_layer_sizes[i - 1], self.all_layer_sizes[i]\n\t\t\t\t\tactivation_function = self.activation_function\n\t\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function,\n\t\t\t\t\t last_hidden)\n\t\t\t\tname_scope = 'final_layer'\n\t\t\t\trow_size, col_size = self.layerSize[-1], self.num_of_classes\n\t\t\t\tactivation_function = None\n\t\t\t\tlast_hidden = self.initilizae_layer(name_scope, row_size, col_size, activation_function, last_hidden)\n\t\treturn self.hidden", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def keras_add_layers(model, num_classes, keep_prob):\n # DONE: Implement function\n\n # See also lesson \"FCN-8 Decoder\" for structure, and Long_Shelhamer paper\n\n # Walkthrough video started with 1x1 convolution like this, but notes explained\n # that was already done for us (loaded model is not ordinary VGG but already\n # adapted for FCN). In fact the VGG network provided looks very much like\n # the one generated by the Single-Shot Detector caffe code, so I guess they\n # share some common heritage.\n #conv_1x1 = tf.layers.conv2d(vgg_layer7_out, # at/near end of VGG\n # num_classes, # just road/nonroad for us\n # 1, # as 1x1 conv\n # padding='same',\n # kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3))\n\n # Using Tensorboard to visualise the structure of the Udacity VGG model provided, and\n # tf.trainable_variables() to list the dimensions and sizes of the weights and biases\n # for each layer, I arrive at this summary of what shape the output of each layer\n # is (knowing that we started with a 160 height x 576 width x 3 colour channel image).\n # All of the convolution layers have SAME padding and [1,1,1,1] strides so they\n # don't reduce the x-y pixel size. All the pooling layers have [1,2,2,1] strides so\n # they halve the pixel size. I'm ignoring the first dimension (across images), as\n # everything works on one image at a time.\n #\n # Layer name Details Output dimensions\n # <input> raw image 160x576x3\n # conv1_1 conv2d 3x3x3x64, Relu 160x576x64\n # conv1_2 conv2d 3x3x64x64, Relu 160x576x64\n # pool1 pool [1,2,2,1] 80x288x64\n # conv2_1 conv2d 3x3x64x128, Relu 80x288x128\n # conv2_2 conv2d 3x3x128x128, Relu 80x288x128\n # pool2 pool [1,2,2,1] 40x144x128\n # conv3_1 conv2d 3x3x128x256, Relu 40x144x256\n # conv3_2 conv2d 3x3x256x256, Relu 40x144x256\n # conv3_3 conv2d 3x3x256x256, Relu 40x144x256\n # pool3 pool [1,2,2,1] 20x72x256 --> layer3_out\n # conv4_1 conv2d 3x3x256x512, Relu 20x72x512\n # conv4_2 conv2d 3x3x512x512, Relu 20x72x512\n # conv4_3 conv2d 3x3x512x512, Relu 20x72x512\n # pool4 pool [1,2,2,1] 10x36x512 --> layer4_out\n # conv5_1 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_2 conv2d 3x3x512x512, Relu 10x36x512\n # conv5_3 conv2d 3x3x512x512, Relu 10x36x512\n # pool5 pool [1,2,2,1] 5x18x512\n # fc6 conv2d 7x7x512x4096, Relu 5x18x4096\n # dropout dropout(keep_prob) 5x18x4096\n # fc7 conv2d 1x1x4096x4096, Relu 5x18x4096\n # dropout_1 dropout(keep_prob) 5x18x4096 --> layer7_out\n # layer8 conv2d_t 10x36\n\n layer3_out = model.get_layer('block3_pool').output\n layer4_out = model.get_layer('block4_pool').output\n\n # Problem here: TF2 library model doesn't have image-shaped layers 6 & 7 like\n # model provided originally with TF1, but instead is flattened amporphous classifier.\n # So we're working with more 'raw' layer as input. TODO should add back\n # two conv2d layers before this to be like the original\n drop_prob = 1.0 - keep_prob\n\n layer5 = model.get_layer('block5_pool')\n\n layer6_conv = tf.keras.layers.Conv2D(4096,\n 7, # 7x7 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess same as others\n name='layer6_conv')\n\n layer6_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer6_dropout\")\n\n layer7_conv = tf.keras.layers.Conv2D(4096,\n 1, # 1x1 patch from original Udacity model\n strides=(1,1),\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)), # guess\n name='layer7_conv')\n\n layer7_dropout = tf.keras.layers.Dropout(drop_prob, name=\"layer7_dropout\")\n\n # Connect up the new layers\n x = layer6_conv(layer5.output)\n x = layer6_dropout(x)\n x = layer7_conv(x)\n layer7 = layer7_dropout(x)\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=layer7)\n\n # We should now have the same structure as the original Udacity version of VGG16,\n # but still need to add the decoder and skip connections as before\n\n # Upsample by 2. We need to work our way down from a kernel depth of 4096\n # to just our number of classes (i.e. 2). Should we do this all in one go?\n # Or keep more depth in as we work upwards? For now doing it all in one hit.\n layer8 = tf.keras.layers.Conv2DTranspose(num_classes, #filters, \n 4, # kernel size taken from classroom example, might experiment\n strides=2, # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer8')\n\n # Now we're at 10x36x2 so we have same pixel resolution as layer4_out. Can't directly add\n # in layer4_out because it has filter depth of 512. (Though we could have had our transpose\n # convolution only downsample to 512 for compatibility... might try that later)\n\n # Squash layer4 output with 1x1 convolution so that it has compatible filter depth (i.e. num_classes)\n layer4_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters,\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer4_squashed')\n # upsample by 2\n layer9 = tf.keras.layers.Conv2DTranspose(num_classes, # filters\n 4, # kernel size taken from classroom example\n strides=(2,2), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer9')\n\n # Now we're at 20x72x2 so same pixel resolution as layer3_out, but need to squash that from\n # 256 filters to 2 (num_classes) before we can add it in as skip connection\n layer3_squashed = tf.keras.layers.Conv2D(num_classes, # new number of filters\n 1, # 1x1 convolution so kernel size 1\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer3_squashed')\n\n # upsample by 8 to get back to original image size\n layer10 = tf.keras.layers.Conv2DTranspose(num_classes,\n 32, # Finding quite large kernel works nicely\n strides=(8,8), # stride causes upsampling\n padding='same',\n kernel_regularizer = tf.keras.regularizers.l2(0.5 * (1e-3)),\n name='layer10')\n\n # so now we should be at 160x576x2, same as original image size, 2 classes\n\n # Connect the layers\n x1 = layer8(layer7)\n x2 = layer4_squashed(layer4_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer8_plus_layer4 = tf.keras.layers.add([x1, x2], name='layer8_plus_layer4')\n #layer8_plus_layer4 = tf.add(layer8, layer4_squashed, name='layer8_plus_layer4')\n\n x1 = layer9(layer8_plus_layer4)\n x2 = layer3_squashed(layer3_out)\n\n # now we can add skip layer of this dimension taken from corresponding encoder layer\n layer9_plus_layer3 = tf.keras.layers.add([x1, x2], name='layer9_plus_layer3')\n #layer9_plus_layer3 = tf.add(layer9, layer3_squashed, name='layer9_plus_layer3')\n\n predictors = layer10(layer9_plus_layer3) # layer 10 should be same size as image\n\n # Create a new model\n mod_model = tf.keras.Model(inputs=model.input, outputs=predictors)\n print(\"Model after adding decoder layers:\")\n mod_model.summary()\n\n return mod_model", "def iteration_layers(model, speedup, session, indepth_layer=None):\n if speedup is True:\n layer_names_reduced = ['conv2d1',\n 'conv2d2',\n 'mixed3b',\n 'mixed4b',\n 'mixed5b']\n layer_tensors = [session.graph.get_tensor_by_name(name + \":0\") for name in layer_names_reduced]\n else:\n layer_tensors = model.layer_tensors\n\n return layer_tensors", "def layers(self, layers):\n self._layers = layers\n self.thetas = []\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer\n self.thetas.extend(layer.thetas())", "def viewOnFlatLayer(layer, dimensions, name = None):\n assert max(dimensions) > 1, \"At least one dimension needs to be larger than one.\"\n def slicer():\n nbunits = reduce(lambda x, y: x*y, dimensions, 1)\n insize = layer.indim // nbunits\n outsize = layer.outdim // nbunits\n for index in range(nbunits):\n yield ModuleSlice(layer, insize*index, insize*(index+1), outsize*index, outsize*(index+1))\n c = slicer()\n return ModuleMesh(lambda: next(c), dimensions, name)", "def num_layers(self): # -> int:\n ...", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def __call__(cls, *args, **kwargs):\n layer = super(LayerAspect, cls).__call__(*args, **kwargs)\n\n if Job.Current:\n Job.Current.addLayer(layer)\n \n layer.afterInit()\n return layer", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def __init__(self, model):\n self.output_weights = model.get_layer(\"output\").get_weights()[0]\n self.cam_model = Model(inputs=model.input, outputs=(model.get_layer(\"activation\").output, model.get_layer(\"output\").output))", "def layers(self):\n\n if not self.last_node:\n return []\n return nuke.layers(self.last_node)", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def handle_layers(context, model, toplayer, layerids, materials, update, import_hidden=False):\n #setup main container to hold all layer collections\n layer_col_id=\"Layers\"\n if not layer_col_id in context.blend_data.collections:\n layer_col = context.blend_data.collections.new(name=layer_col_id)\n try:\n toplayer.children.link(layer_col)\n except Exception:\n pass\n else:\n #If \"Layers\" collection is in place, we assume the plugin had imported 3dm before\n layer_col = context.blend_data.collections[layer_col_id]\n\n # build lookup table for LayerTable index\n # from GUID, create collection for each\n # layer\n for lid, l in enumerate(model.Layers):\n if not l.Visible and not import_hidden:\n continue\n lcol = utils.get_iddata(context.blend_data.collections, l.Id, l.Name, None)\n layerids[str(l.Id)] = (lid, lcol)\n utils.tag_data(layerids[str(l.Id)][1], l.Id, l.Name)\n '''\n matname = l.Name + \"+\" + str(l.Id)\n if matname not in materials:\n laymat = utils.get_iddata(context.blend_data.materials, l.Id, l.Name, None)\n if update:\n\t laymat.use_nodes = True\n\t r, g, b, _ = l.Color\n\t principled = PrincipledBSDFWrapper(laymat, is_readonly=False)\n\t principled.base_color = (r/255.0, g/255.0, b/255.0)\n materials[matname] = laymat\n '''\n # second pass so we can link layers to each other\n for l in model.Layers:\n # link up layers to their parent layers\n if str(l.ParentLayerId) in layerids:\n parentlayer = layerids[str(l.ParentLayerId)][1]\n try:\n parentlayer.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass\n # or to the top collection if no parent layer was found\n else:\n try:\n layer_col.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass", "def mini_model(self):\n with tf.variable_scope(name_or_scope='human2d_network'):\n # down-sampling\n resi_0 = res_layer(self._input, filters=16, strides=2, kernel_size=7, training=self.training, name='resi_0')\n resi_1 = res_layer(resi_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_1')\n pool_0 = max_pool_layer(resi_1, name='pool_0')\n resi_2 = res_layer(pool_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_2')\n # hourglass module\n resi_3 = res_layer(resi_2, filters=64, strides=1, kernel_size=3, training=self.training, name='resi_3')\n hrgs_0 = hourglass_layer(resi_3, training=True, name='hrgs_0')\n # keypoint output\n keypoint_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_0')\n keypoint_pre_1 = res_layer(keypoint_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_1')\n keypoint_pre_2 = res_layer(keypoint_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_2')\n keypoint_output_raw = res_layer(keypoint_pre_2, filters=14, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='keypoint_output_raw')\n keypoint_output = tf.nn.sigmoid(x=keypoint_output_raw, name='keypoint_output')\n # silhouette output\n silhouette_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_0')\n silhouette_pre_1 = res_layer(silhouette_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_1')\n silhouette_pre_2 = res_layer(silhouette_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_2')\n silhouette_output_raw = res_layer(silhouette_pre_2, filters=2, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='silhouette_output_raw')\n silhouette_output = tf.nn.softmax(logits=silhouette_output_raw, name='silhouette_output')\n # return\n return None, None, keypoint_output, silhouette_output", "def get_all_structural(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n if layer.is_structural():\n\n layers.append(layer)\n\n # sort layers to make sure numbered nodes are added first and to maintain regular order\n layers.sort(key = lambda x: x.to_int())\n\n return layers", "def visualize_conv_layers(self, layer_name='conv1', savefig_path=\"\"):\n\n # The name of the layer we want to visualize\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n\n # Visualize all filters in this layer.\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n\n # Generate input image for each filter. Here `text` field is used to overlay `filter_value` on top of the image.\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n\n # Generate stitched image palette with 8 cols.\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n\n print('debug')", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n plot_model(vgg, 'vgg19_diagram.png')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def print_layer_io_shapes(model):\n for i, _ in enumerate(model.layers):\n print(\"layer {} input: \".format(i), model.layers[i].input_shape)\n print(\"layer {} output:\".format(i), model.layers[i].output_shape)", "def getVisibilityLayers(self):\n return self._VisibilityLayers", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def visualize_model(self, ax):\n ax.imshow(self.w[1:].reshape(28, -1, order='F').T, cmap='bone')", "def __call__(self):\n custom_obj = {'tf': tf, 'relu6': tf.nn.relu6}\n wfile = self._get_model_weights()\n model = tf.keras.models.load_model(wfile, custom_objects=custom_obj)\n\n if not self._trainable:\n # freeze encoder layers up to\n # expanded_conv_16_project_BN\n for layer in model.layers[1:147]:\n layer.trainable = False\n\n return model", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def list_layers(service):\n r = _post(service)\n if 'layers' in r:\n return [layer(p) for p in r['layers']]\n return", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def ApplyInputs(ss, en):\n ss.Net.InitExt()\n\n lays = [\"Input\", \"Output\"]\n for lnm in lays :\n ly = leabra.Layer(ss.Net.LayerByName(lnm))\n pats = en.State(ly.Nm)\n if pats != 0:\n ly.ApplyExt(pats)", "def filesystem_layers(self):\n pass", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def __init__(self,inputSize,outputSize, *args, **kwds):\n #currently the code is only for 2 hidden layers, apart from in and out\n self._saveFile = kwds.get('saveFile')\n self._inputSize = inputSize\n self._outputSize= outputSize\n self._layer1 = keras.layers.Dense(128,activation='relu')\n self._layer2 = keras.layers.Dense(64,activation='relu') \n self._layer3 = keras.layers.Dense(128,activation='relu')\n self._piLayer = keras.layers.Dense(self._outputSize-1,activation='softmax')\n self._zLayer = keras.layers.Dense(1,activation='tanh')\n self._inputs = keras.Input(shape=(self._inputSize,)) #returns placeholder\n x = self._layer1(self._inputs)\n x = self._layer2(x)\n x = self._layer3(x)\n self._outPi = self._piLayer(x)\n self._outZ = self._zLayer(x)\n self._output = keras.layers.concatenate([self._outPi,self._outZ],axis = -1)\n self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n# self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n self._model.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.99, beta_2=0.999, epsilon=1e-10, decay=0.0001),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n self._epochSize = 256", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def common_layers_with_encoder(self):\n return [\n self.self_attention, self.self_attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_layer_norm\n ]", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def _get_layers(self):\n from keras.engine.topology import InputLayer\n\n layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)]\n logger.info('Inferred %i hidden layers on Keras classifier.', len(layer_names))\n\n return layer_names", "def vgg_layers(layer_names):\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model", "def __init__(self): #initializing\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 1) # one input/feature , one output\n # here where other NN layers are added", "def vis_layer(model, layer, channel):\n num_channels = dla_lucid.LAYERS[layer][1]\n all_vis = []\n for i in range(num_channels):\n if channel is True:\n vis = vis_channel(model, layer, i)\n else:\n vis = vis_neuron(model, layer, i)\n all_vis.append(vis)\n\n all_vis_array = np.array(all_vis)\n return all_vis_array", "def setup_to_transfer_learn(model):\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def LayerAddflatten(bottom_model, num_classes):\n top_model = bottom_model.output\n top_model = Flatten(name = \"flatten\")(top_model)\n top_model = Dense(526, activation = \"relu\")(top_model)\n top_model = Dense(263, activation = \"relu\")(top_model)\n top_model = Dense(num_classes, activation = \"sigmoid\")(top_model)\n return top_model", "def _export_model(self):\n graph = ComputeGraph.from_onnx(self.onnx_model.graph)\n\n print(\"Running constant propagation\")\n constant_states = constant_propagation(graph)\n\n self._remove_constants(graph, constant_states)\n self._remove_nops(graph, constant_states)\n\n # Add shape information from constant propagation:\n for var, res in constant_states.items():\n if var in graph.shape_dict:\n shape = graph.shape_dict[var]\n if res.shape != shape:\n print(\"Warning: Shapes do not match: \", var, res.shape, shape)\n if res.shape is not None:\n print(\"Replacing shape {} with {}\".format(shape, res.shape))\n graph.shape_dict[var] = res.shape\n elif res.shape is not None:\n graph.shape_dict[var] = res.shape\n\n print(\"Inference graph:\")\n for node in graph.nodes:\n inputs = node.inputs\n input_shapes = (str(graph.shape_dict[i]) for i in node.inputs if i in graph.shape_dict)\n outputs = node.outputs\n output_shapes = (str(graph.shape_dict[o]) for o in node.outputs if o in graph.shape_dict)\n print(\"{:<24} {:<20} {:<30} {:<30} {:<20} {:<30}\".format(node.name,\n node.op_type,\n \",\".join(inputs),\n \",\".join(input_shapes),\n \",\".join(outputs),\n \",\".join(output_shapes)))\n\n memory_manager = MemoryManager()\n\n self._generate_weights_file(graph)\n\n self.dummy_input = generate_dummy_main(graph)\n\n self.reference_input = generate_reference_main(graph)\n\n self._generate_network_initialization(graph, memory_manager)\n\n self._generate_network_cleanup(graph, memory_manager)\n\n implementations = self._select_implementations(graph, memory_manager)\n schedule = self._get_schedule(graph, implementations)\n # self._print_live_ranges(schedule)\n\n input_names = [\"input_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.inputs]\n output_names = [\"output_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.outputs]\n\n \"\"\"Currently we only allow single input (no batch processing) to the CNN, but this may be multi-channel input\"\"\"\n inputs = graph.inputs\n if len(inputs) > 1:\n print(\"ERROR: Multiple inputs not supported!\")\n exit(1)\n else:\n input_shape = graph.shape_dict[inputs[0].name]\n print(\"Input shape: {}\".format(input_shape))\n\n if len(input_shape) == 4:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 3:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 2:\n print(\"Input is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n outputs = graph.outputs\n if len(outputs) > 1:\n print(\"ERROR: Multiple outputs not supported\")\n exit(1)\n else:\n output_shape = graph.shape_dict[outputs[0].name]\n print(\"Output shape: {}\".format(output_shape))\n\n if len(output_shape) == 2:\n print(\"Output is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n output_defs = [\"pico_cnn::naive::Tensor *\" + n for n in output_names]\n elif len(output_shape) == 3:\n print(\"ERROR: Unknown output shape of network: {}\".format(output_shape))\n exit(1)\n elif len(output_shape) == 4:\n print(\"ERROR: Multi-dimensional output is currently not supported.\")\n exit(1)\n\n network_def = \"void Network::run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n network_def_header = \"void run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n\n layer_declaration_code = \"\"\n layer_allocation_code = \"\"\n layer_execution_code = \"\"\n layer_deletion_code = \"\"\n\n \"\"\"Iterate over all tasks in the schedule, put some debug info in the code and the pico-cnn implementation.\"\"\"\n for task in schedule:\n num, node, impl = task\n layer_allocation_code += \" //Layer \" + str(num) + \" \" + node.name + \" \" + node.op_type + \"\\n\"\n layer_allocation_code += \" //Attributes\\n\"\n for key, val in node.attrs.items():\n layer_allocation_code += \" // \" + str(key) + \": \" + str(val) + \"\\n\"\n layer_allocation_code += \" //Parameters\\n\"\n layer_allocation_code += \" //Inputs: \" + \",\".join(node.inputs) + \"\\n\"\n layer_allocation_code += \" //Outputs: \" + \",\".join(node.outputs) + \"\\n\"\n layer_allocation_code += \" //Shape:\\n\"\n for i in node.inputs:\n layer_allocation_code += \" // {}: {}\\n\".format(i, graph.get_shape(i))\n for o in node.outputs:\n layer_allocation_code += \" // {}: {}\\n\".format(o, graph.get_shape(o))\n\n if impl:\n layer_declaration_code += impl.generate_declaration()\n layer_declaration_code += \"\\n\"\n\n layer_allocation_code += impl.generate_allocation()\n layer_allocation_code += \"\\n\"\n\n layer_execution_code += impl.generate_execution()\n layer_execution_code += \"\\n\"\n\n layer_deletion_code += impl.generate_deletion()\n layer_deletion_code += \"\\n\"\n\n else:\n print(\"ERROR: Unsupported layer: {}! Aborting code generation.\".format(node.op_type))\n return 1\n\n self.constructor_code += layer_allocation_code + \"\\n\"\n self.destructor_code += layer_deletion_code + \"\\n\"\n\n # # TODO: What does this loop do?\n # for id, buffer in memory_manager.buffers.items():\n # if graph.is_tensor(id):\n # continue\n # if graph.is_input(id):\n # continue\n # if graph.is_output(id):\n # continue\n\n network_code: Text = \"#include \\\"network.h\\\"\\n\\n\"\n network_code += \"Network::Network() {\\n\\n\"\n network_code += self.constructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += \"Network::~Network() {\\n\"\n network_code += self.destructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += network_def+\"{\\n\"\n network_code += layer_execution_code\n\n network_code += \"}\\n\\n\"\n\n network_header = \"#ifndef NETWORK_H\\n\"\n network_header += \"#define NETWORK_H\\n\\n\"\n network_header += \"#include \\\"pico-cnn/pico-cnn.h\\\"\\n\\n\"\n network_header += \"class Network {\\n\"\n network_header += \"public:\\n\"\n network_header += \"Network();\\n\"\n network_header += \"~Network();\\n\"\n network_header += network_def_header + \"; \\n\\n\"\n network_header += self.buffer_declaration + \"\\n\"\n network_header += layer_declaration_code\n network_header += \"};\\n\"\n network_header += \"#endif //NETWORK_H\\n\"\n\n self.network_code = network_code\n self.network_header = network_header\n\n \"\"\"\n Create Makefile containing a target for the generated dummy input and a network specific one.\n The code for the network specific input has to be written manually.\n \"\"\"\n # TODO: Does this need to be more sophisticated?\n self.makefile = \"CC = g++\\n\"\n self.makefile += \"CFLAGS = -std=c++11 -Wall -O2 -march=native -DINFO\\n\"\n self.makefile += \"LDFLAGS = -L../../../pico-cnn\\n\"\n self.makefile += \"LD_LIBS = -lpico-cnn -lm\\n\\n\"\n self.makefile += \"# list of all generated .cpp files.\\n\"\n self.makefile += \"NETWORK_LIST = network.cpp\"\n self.makefile += \"\\n\\ndummy_input: dummy_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) dummy_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) $(LDFLAGS) $(LD_LIBS) -o dummy_input\"\n self.makefile += \"\\n\\nreference_input: reference_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) reference_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o reference_input\"\n self.makefile += \"\\n\\n{}: {}.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\".format(self.model_name, self.model_name)\n self.makefile += \"$(CC) {}.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o {}\".format(self.model_name, self.model_name)\n self.makefile += \"\\n\\nall: dummy_input reference_input {}\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: clean\\n\"\n self.makefile += \"clean:\\n\\trm -rf {} dummy_input reference_input\\n\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: libpico-cnn.a\\n\"\n self.makefile += \"libpico-cnn.a:\\n\\t$(MAKE) -C ../../../pico-cnn\"\n\n self.save(\"./generated_code/{}\".format(self.model_name))", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def __call__(self, inputs, **kwargs):\n # Actually call the layer (optionally building it).\n output = super(Layer, self).__call__(inputs, **kwargs)\n if context.in_eager_mode():\n return output\n\n # Un-built subclassed network: build it\n if isinstance(self, Network) and not self.inputs:\n self._set_inputs(inputs, training=kwargs.get('training'))\n\n # Update learning phase info.\n output_tensors = to_list(output)\n uses_lp = any(\n [getattr(x, '_uses_learning_phase', False) for x in to_list(inputs)])\n uses_lp = getattr(self, 'uses_learning_phase', False) or uses_lp\n for i in range(len(output_tensors)):\n output_tensors[i]._uses_learning_phase = getattr(\n output_tensors[i], '_uses_learning_phase', False) or uses_lp\n\n # Optionally load weight values that were specified at layer instantiation.\n if hasattr(self, '_initial_weights') and self._initial_weights is not None:\n self.set_weights(self._initial_weights)\n del self._initial_weights\n return output", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def common_layers(self):\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n CustomTransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(CustomTransformer, self).build(unused_input_shapes)", "def create_feature_layers(self):\n feature_columns = [tf.feature_column.numeric_column(name,\n normalizer_fn=lambda x: (x - self.train_features[\n name].mean()) /\n self.train_features[name].std())\n for name in self.feature_names]\n\n self.feature_layers = layers.DenseFeatures(feature_columns)\n return 'feature layers had been created'", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf" ]
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099", "0.57425326", "0.57222074", "0.5712829", "0.5712829", "0.56764674", "0.56520236", "0.56172395", "0.5594013", "0.55928683", "0.5580619", "0.5579592", "0.55595297", "0.5550327", "0.55495214", "0.5527533", "0.5517842", "0.55077225", "0.5502721", "0.55026466", "0.549963", "0.5492102", "0.5485171", "0.54708755", "0.54690754", "0.5463765", "0.5462446", "0.5460833", "0.5456318", "0.5452711", "0.5444138", "0.5443301", "0.5439382", "0.54338735", "0.5421428", "0.5419587", "0.5408111", "0.54061496", "0.5398926", "0.5398483", "0.53930515", "0.53733885", "0.53709096", "0.53703576", "0.5361642", "0.5359667", "0.53589463", "0.5354624", "0.53545886", "0.535422", "0.53375673", "0.5334994", "0.53269535", "0.53221035", "0.5317047", "0.53027296", "0.52973855", "0.52901715", "0.5275039", "0.5274979", "0.52737033", "0.5266431", "0.5261835", "0.52605796", "0.52600664", "0.52496713", "0.52330184", "0.5231018", "0.52298784", "0.5227895", "0.522487", "0.52173674", "0.52170604", "0.5214551", "0.52106416", "0.5199109", "0.5191151", "0.518876", "0.5188223", "0.5186258", "0.5180307" ]
0.0
-1
The top level function.
def eval_tree(tree): global genv global result # Here, get the list of children nodes. Iterate over that list, calling eval_node on each node. for node in tree.body: val = eval_node(node, genv) result = val[0] genv = val[1] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n\n pass", "def main_function():\n return 1", "def firstFunction(self):", "def main():\n pass", "def main_code():\n pass", "def main():\n\tpass", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main(self):\n pass", "def my_function():\n\tpass", "def main(self):\r\n pass", "def main():\n Main()", "def root():\n pass", "def main():\n pass\n\n if __name__ == \"__main)__\":\n main()", "def main():\n return 0", "def main():\n return 0", "def main():\n return", "def main():\n ...", "def local():\n\n pass", "def main():\n hello()", "def make_main_function(self):\n\t\tself.main = self.tokens.get_joined()\n\t\tfor func in self.func_dict:\n\t\t\tself.main = self.main.replace(self.func_dict[func], func)", "def main(self):", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(self) -> None:\n pass", "def __call__(fun_name):", "def fn():", "def somefunc():", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def foo():\r\n pass", "def my_fuction():\n pass", "def my_function():\n\n\treturn None", "def premain(self):\r\n return self._premain", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def __def_function__():\n pass", "def caller():\n\n for func in funcs:\n func()", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def smarter():\r\n pass", "def inner():\n print(\"2This is inner function\\n\", inner.__name__, inner.__doc__)\n func()", "def func():", "def dummy_fn(self):\n\t\tpass", "def simple():", "def simple():", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass", "def my_function():\n pass" ]
[ "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.68720514", "0.6840477", "0.683859", "0.67733955", "0.6754576", "0.6752038", "0.6747416", "0.66929114", "0.66929114", "0.6500867", "0.6488615", "0.6465591", "0.64609444", "0.6454063", "0.6431377", "0.6327961", "0.6327961", "0.63004494", "0.628516", "0.62517905", "0.6190485", "0.61717844", "0.6165816", "0.6161707", "0.6161707", "0.6161707", "0.6161707", "0.6142732", "0.614138", "0.6138642", "0.6114361", "0.60954136", "0.60642093", "0.60548645", "0.60462", "0.6040708", "0.60274225", "0.60274225", "0.60274225", "0.60204595", "0.6019372", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60071373", "0.60048074", "0.59916276", "0.5984874", "0.5984434", "0.59680045", "0.59680045", "0.5932019", "0.5932019", "0.5932019", "0.5932019", "0.5932019", "0.5932019", "0.5932019", "0.5932019" ]
0.0
-1
Evaluates a Node object in the abstract syntax tree.
def eval_node(node, env): global genv global result node_type = node_name(node) if node_type == 'Expr': return eval_node(node.value, env) elif node_type == 'Assign': val = eval_node(node.value, env) while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv): val = val[0] # extract the variable name, evaluate the RHS, then extend the environment. return 0, env.extend([node.targets[0].id], [val]) elif node_type == 'BinOp': # get the left and right operands (we use only single operands) and the operator. # evaluate the operands and apply the operator. return the number, env. left = eval_node(node.left, env)[0] right = eval_node(node.right, env)[0] left = left[0] if type(left) is tuple else left right = right[0] if type(right) is tuple else right op = node_name(node.op) if op == "Add": return (left + right), env elif op == "Sub": return (left - right), env elif op == "Mult": return (left * right), env elif op == "Div": return (left / right), env elif op == "Mod": return (left % right), env return 0, env elif node_type == 'FunctionDef': # need the function id (name), args, and body. Extend the environment. # you can leave the args wrapped in the ast class and the body and unpack them # when the function is called. return 0, env.extend([node.name], [(node.args, node.body)]) elif node_type == 'Call': # get any values passed in to the function from the Call object. # get the fxn name and look up its parameters, if any, and body from the env. # get lists for parameter names and values and extend a LocalEnv with those bindings. # evaluate the body in the local env, return the value, env. func = eval_node(node.func, env)[0] local_env = LocalEnv(None, env) args = func[0].args body = func[1] index = 0 for val in node.args: local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]]) index += 1 for node in body: val = eval_node(node, local_env) if node_name(node) == "Return": output_val = val[0] local_env = val[1] return output_val, env elif node_type == 'Return': # evaluate the node, return the value, env. return eval_node(node.value, env) elif node_type == 'Name': # Name(identifier id)- lookup the value binding in the env # return the value, env return env.lookup(node.id), env # Num(object n) -- a number, return the number, env. elif node_type == 'Num': return node.n, env
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, tree):\n\t\tpass", "def eval(self, node):\n\n return None", "def eval(self):\n return self._eval_node(self.syntax_tree)", "def eval(node):\n if node.id == '(literal)':\n return node.value\n elif node.id == '(name)':\n return scope[node.value]\n elif node.id == '(':\n name, args = node.children\n name = eval(name)\n args = map(eval, args)\n return name(*args)\n elif node.id == 'and':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return eval(node.children[1])\n else:\n return first\n elif node.id == 'or':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return first\n else:\n return eval(node.children[1])\n elif node.id == 'not':\n assert len(node.children) == 1\n return not eval(node.children[0])\n elif node.id in prefix_operators and len(node.children) == 1:\n value = eval(node.children[0])\n return prefix_operators[node.id](value)\n elif node.id in operators:\n values = [eval(v) for v in node.children]\n return operators[node.id](*values)\n else:\n raise ValueError('unknown node type', node)", "def eval(self) -> typing.Any:\n return self.expr()", "def xpathNodeEval(self, str, ctx):\n if ctx is None: ctx__o = None\n else: ctx__o = ctx._o\n ret = libxml2mod.xmlXPathNodeEval(self._o, str, ctx__o)\n if ret is None:raise xpathError('xmlXPathNodeEval() failed')\n return xpathObjectRet(ret)", "def _evaluate(self, tree: nltk.tree.Tree):\n if tree.label() == \"Root\":\n if len(tree) == 1:\n func = self._evaluate(tree[0])\n func()\n else:\n func = self._evaluate(tree[0])\n result = self._evaluate(tree[1])\n func(result)\n return\n elif tree.label() == \"Result\":\n if tree[0].label() == \"Entity\":\n return self._evaluate(tree[0])\n if tree[0].label() == \"Unary_Command\":\n func = self._evaluate(tree[0])\n result = self._evaluate(tree[1])\n return func(result)\n if tree[1].label() == \"Binary_Command\":\n result_left = self._evaluate(tree[0])\n func = self._evaluate(tree[1])\n result_right = self._evaluate(tree[2])\n return func(result_left, result_right)\n elif tree.label() == \"Unary_Command\":\n func = self.unary_commands.get(tree[0])[1]\n return func\n elif tree.label() == \"Terminal_Command\":\n func = self.terminal_commands.get(tree[0])[1]\n return func\n elif tree.label() == \"Binary_Command\":\n func = self.binary_commands.get(tree[0])[1]\n return func\n elif tree.label() == \"Entity\":\n return [tree[0]]\n\n print(\"Error: CFG label rule not defined in \"\n \"evaluateEngine#self._evaluate\",\n file=sys.stderr)", "def evaluate(self):\n return self._evaluate_recur(self.root())", "def evaluate(self):\n return self._evaluate_recur(self.root())", "def execute(self, root):\n assert isinstance(root, Node)\n\n null = Null()\n\n def optional(expression):\n \"\"\"return True iff expression is optional\"\"\"\n return any(e.data == 'optional' for e in expression.children)\n\n def concatenate(expression, stream):\n \"\"\"evaluate query expressions and concatenate results\"\"\"\n # fork the stream for each subexpression\n streams = itertools.tee(stream, len(expression.children))\n return itertools.chain.from_iterable(\n evaluate(expression, stream)\n for expression, stream in zip(expression.children, streams)\n )\n\n def iterate(expression, stream):\n \"\"\"iterate over json stream\"\"\"\n for node in stream:\n itr = (\n iter(node)\n if isinstance(node, List) else\n iter(node.values())\n if isinstance(node, Object) else\n iter([])\n if optional(expression) else\n None\n )\n if not itr:\n raise TypeError(\n 'cannot iterate over {}'.format(\n node.__class__.__name__\n )\n )\n for child in itr:\n yield child\n\n def indexer(expression, stream):\n \"\"\"extract elements from json containers\"\"\"\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)\n\n def properties(expression, stream):\n \"\"\"extract values from json objects\"\"\"\n def index(expression, stream):\n item = expression.children[0].children[0]\n for node in stream:\n if isinstance(node, Object):\n yield node.get(item, null)\n elif not optional(expression):\n itype = expression.children[0].data\n if itype == 'cname':\n itype = 'string'\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__, itype\n )\n )\n\n for expression in expression.children:\n stream = index(expression, stream)\n\n for node in stream:\n yield node\n\n def primitive(expression):\n \"\"\"return a primitive type\"\"\"\n expression = expression.children[0]\n if expression.data == 'null':\n return null\n elif expression.data == 'boolean':\n return expression.children[0] == 'true'\n elif expression.data == 'string':\n return expression.children[0][1:-1]\n elif expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n return float(expression.children[0])\n assert False, 'bad primitive {}'.format(expression)\n\n def evaluate(expression, stream):\n \"\"\"evaluate query expression over json stream\"\"\"\n assert expression.data == 'expression', expression\n assert len(expression.children) == 1\n\n expression = expression.children[0]\n\n if expression.data == 'identity':\n for node in stream:\n yield node\n\n elif expression.data == 'primitive':\n yield primitive(expression)\n\n elif expression.data == 'properties':\n for node in properties(expression, stream):\n yield node\n\n elif expression.data == 'indexer':\n for node in indexer(expression, stream):\n yield node\n\n elif expression.data == 'iterator':\n for node in iterate(expression, stream):\n yield node\n\n elif expression.data == 'concatenator':\n for node in concatenate(expression, stream):\n yield node\n\n else:\n assert False, 'bad expression {}'.format(expression)\n\n stream, pipeline = [root], self.tree.children[0]\n for expression in pipeline.children:\n stream = evaluate(expression, stream)\n\n for result in stream:\n yield result", "def safe_eval(query, node, *args, **kw):\n #TODO(mvv): assert node type is an ast type\n kw.update({'node': node})\n return eval(query, {'ast': ast}, kw)", "def eval_tree(self, root, left=None, right=None,\r\n cards=(\"3H\", \"5D\", \"AS\")):\r\n return Tree(root, left, right).evaluate(cards)", "def eval(self):\n raise NotImplementedError", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def xpath_eval(node,expr,namespaces=None):\r\n ctxt = common_doc.xpathNewContext() #@UndefinedVariable\r\n ctxt.setContextNode(node)\r\n ctxt.xpathRegisterNs(\"ns\",COMMON_NS)\r\n if namespaces:\r\n for prefix,uri in namespaces.items():\r\n ctxt.xpathRegisterNs(unicode(prefix),uri)\r\n ret=ctxt.xpathEval(unicode(expr))\r\n ctxt.xpathFreeContext()\r\n return ret", "def evaluate(compiled_expression):", "def evaluateASTNode(*args):\n return _libsbml.SBMLTransforms_evaluateASTNode(*args)", "def _process_expr(self, node: ast.Expr) -> None:\n if isinstance(node.value, ast.Call):\n self._process_call(node.value)\n elif isinstance(node.value, ast.Constant):\n self._process_constant(node.value)\n else:\n self.visit(node)", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def expr_eval(node, table=Memory()):\n\n if node.kind == \"BIN_OP\":\n # Evaluate both sides, then return (left <op> right).\n left = expr_eval(node.left, table)\n right = expr_eval(node.right, table)\n return bin_ops[node.op](left, right)\n\n elif node.kind == \"NEGATE\":\n return - expr_eval(node.expr, table)\n\n elif node.kind == \"NUM\":\n return node.number\n\n elif node.kind == \"VAR_REF\":\n return table[node.name]\n\n elif node.kind == \"ARRAY_REF\":\n # TODO: This code belongs in the Array datatype.\n\n # Fetch the array.\n array = table[node.name]\n # Compute the index (a Num object).\n index = expr_eval(node.expr, table)\n\n return array[index]\n\n elif node.kind == \"FUNCTION_CALL\":\n # The object representing the function we are calling.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables and a final result.\n table.update_refs(output)\n\n # TODO: fix hack\n if \"result\" in output:\n return output[\"result\"]\n else:\n import sys\n print(\"No result found in table.\")\n sys.exit(0)\n\n elif node.kind == \"ARRAY_EXPR\":\n # Evaluate the expressions in order and create a list.\n return datatypes.List(\n [expr_eval(entry, table) for entry in node.entries])", "def eval(self):\n raise NotImplemented()", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def _eval(self, node, ctx):\n if node is None:\n return None\n elif isinstance(node, ast.Name): # <identifier>\n # lookup identifiers in local namespace\n if node.id in ctx['locals']:\n _local = ctx['locals'][node.id]\n\n # if local variable contains a list, evaluate each element by threading 'get_expr' over it\n if isinstance(_local, list):\n _retlist = []\n for _local_el in _local:\n # non-string elements are simply passed through\n if not isinstance(_local_el, str):\n _retlist.append(_local_el)\n continue\n\n # string-valued elements are evaluated\n try:\n # NOTE: local variable lookup is disabled when threading\n # over lists that were stored in local variables themselves.\n # This is done to prevent infinite recursion errors for\n # expressions which may reference themselves\n _ret_el = self.get_expr(_local_el, locals=None)\n except NameError as e:\n # one element of the list references a local variable\n # -> stop evaluation and return dummy\n # use NameError object instead of None to identifiy\n # dummy elements unambiguously later\n _retlist.append(e)\n else:\n # evaluation succeeded\n _retlist.append(_ret_el)\n return _retlist\n # local variables containing strings are parsed\n elif isinstance(_local, str):\n return self.get_expr(_local, locals=None)\n # all other types are simply passed through\n else:\n return _local\n\n # if no local is found, try a few builtin Python literals\n elif node.id in ('True', 'False', 'None'): # restrict subset of supported literals\n return ast.literal_eval(node.id) # returns corresponding Python literal from string\n\n # if nothing above matched, assume mistyped identifier and give up\n # NOTE: do *not* assume identifier is a ROOT file path. ROOT file paths\n # must be given explicitly as strings.\n else:\n raise NameError(\"Cannot resolve identifier '{}': not a valid Python literal or a registered local variable!\".format(node.id))\n elif isinstance(node, ast.Str): # <string> : array column\n if ctx['input']:\n # lookup in ROOT file\n return self.get(node.s)\n else:\n # return string as-is\n return node.s\n elif isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.Call): # node names containing parentheses (interpreted as 'Call' objects)\n # -- determine function to call\n\n # function handle is a simple identifier\n if isinstance(node.func, ast.Name):\n\n # handle special functions\n if node.func.id in self.special_functions:\n _spec_func_spec = self.special_functions[node.func.id]\n # callable for special function (default to no-op)\n _callable = _spec_func_spec.get('func', lambda x: x)\n # modify avaluation context for special function\n ctx = dict(ctx, **_spec_func_spec.get('ctx', {}))\n\n # call a registered input function\n else:\n try:\n _callable = ctx['functions'][node.func.id]\n except KeyError as e:\n raise KeyError(\n \"Cannot call input function '{}': no such \"\n \"function!\".format(node.func.id))\n\n # function handle is an expression\n else:\n # evaluate 'func' as any other node\n _callable = self._eval(node.func, ctx)\n\n # evaluate unpacked positional arguments, if any\n _starargs_values = []\n if node.starargs is not None:\n _starargs_values = self._eval(node.starargs, ctx)\n\n # starred kwargs (**) not supported for the moment\n if node.kwargs:\n raise NotImplementedError(\n \"Unpacking keyword arguments in expressions via \"\n \"** is not supported. Expression was: '{}'\".format(\n ast.dump(node, annotate_fields=False)))\n\n # evaluate arguments\n _args = map(lambda _arg: self._eval(_arg, ctx), node.args) + _starargs_values\n _kwargs = {\n _keyword.arg : self._eval(_keyword.value, ctx)\n for _keyword in node.keywords\n }\n\n # call function\n return _callable(*_args, **_kwargs)\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return ctx['operators'][type(node.op)](self._eval(node.left, ctx), self._eval(node.right, ctx))\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return ctx['operators'][type(node.op)](self._eval(node.operand, ctx))\n elif isinstance(node, ast.Subscript): # <operator> <operand> e.g., -1\n if isinstance(node.slice, ast.Index): # support subscripting via simple index\n return self._eval(node.value, ctx)[self._eval(node.slice.value, ctx)]\n elif isinstance(node.slice, ast.Slice): # support subscripting via slice\n return self._eval(node.value, ctx)[self._eval(node.slice.lower, ctx):self._eval(node.slice.upper, ctx):self._eval(node.slice.step, ctx)]\n else:\n raise TypeError(node)\n elif isinstance(node, ast.Attribute): # <value>.<attr>\n return getattr(self._eval(node.value, ctx), node.attr)\n elif isinstance(node, ast.List): # list of node names\n return [self._eval(_el, ctx) for _el in node.elts]\n elif isinstance(node, ast.Tuple): # tuple of node names\n return tuple(self._eval(_el, ctx) for _el in node.elts)\n else:\n raise TypeError(node)", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def recursive_eval(sexpr):\n newexpr = rewrite_node(sexpr)\n newexpr.apply(recursive_eval)\n return newexpr", "def visit_expr_stmt(self: Parser, node: doc.Expr) -> None:\n\n res = self.eval_expr(node.value)\n if res is None:\n pass\n elif isinstance(res, Frame):\n res.add_callback(partial(res.__exit__, None, None, None))\n res.__enter__()\n elif isinstance(res, PrimExpr):\n T.evaluate(res)\n elif isinstance(res, (int, bool)):\n T.evaluate(tvm.tir.const(res))\n elif isinstance(res, tvm.relay.Call) and not res.args:\n # Using GlobalVar.__call__ with no arguments is ambiguous, as\n # each IR has a different function Call representation. If\n # this occurs, convert to the TIR representation.\n T.evaluate(tvm.tir.call_tir(res.op))\n elif isinstance(res, str):\n # Ignore docstrings\n pass\n else:\n self.report_error(node, f\"Parsing resulted in unexpected type {type(res)}\")", "def evaluate(self):\n raise NotImplementedError()", "def parse(self, mode=None):\n if mode == \"exec\":\n return self.ast_node\n elif mode == \"eval\":\n if self.expression_ast_node:\n return self.expression_ast_node\n else:\n raise SyntaxError\n elif mode == None:\n if self.expression_ast_node:\n return self.expression_ast_node\n else:\n return self.ast_node\n elif mode == \"exec\":\n raise NotImplementedError\n else:\n raise ValueError(\"parse(): invalid mode=%r\" % (mode,))", "def evaluate(self):\n raise NotImplementedError(\"Abstract method\")", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def eval_tree(tree):\n global genv\n global result\n # Here, get the list of children nodes. Iterate over that list, calling eval_node on each node.\n for node in tree.body:\n val = eval_node(node, genv)\n result = val[0]\n genv = val[1]\n return result", "def evaluate(self):\n eval_list = nx.topological_sort(self.graph)\n for n in eval_list:\n n.evaluate()\n print(\"evaluating type\", type(n))\n\n # Notify observers of finished calculation\n self.notifyObservers(\"EVALUATION DONE\")\n return \"FINISHED\"", "def test_RestrictingNodeTransformer__visit_Eq__1():\n assert restricted_eval('1 == int(\"1\")') is True", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def evaluate(self) -> celpy.celtypes.Value:\n value = self.visit(self.ast)\n if isinstance(value, CELEvalError):\n raise value\n return cast(celpy.celtypes.Value, value)", "def SBMLTransforms_evaluateASTNode(*args):\n return _libsbml.SBMLTransforms_evaluateASTNode(*args)", "def evaluate(self, expression):\n assert self.cluster\n assert self.node_id\n context = self._yaql_context.create_child_context()\n context['$%new'] = self._get_info(self.EXPECTED)[self._node_id]\n context['$%old'] = self._get_info(self.CURRENT).get(self._node_id, {})\n\n parsed_exp = self._yaql_engine(expression)\n return parsed_exp.evaluate(data=context['$%new'], context=context)", "def eval(expr):\n global simulator\n\n if simulator is None:\n print \"program is not running\"\n return\n return simulator.eval (expr)", "def run(self):\r\n\r\n raise NotImplementedError(\"Subclasses of Node should implement the run() method\")", "def load(cls, expr: str, tree_type, parent=None):\n expr = ast.parse(expr, mode='eval')\n return cls.recursive_load(expr, tree_type, parent)", "def eval(self):\n if self._leftchild == None and self._rightchild == None:\n return self._element\n elif self._leftchild == None:\n if self._element == \"-\":\n return -1 * self._rightchild.eval()\n elif self._element == \"sqrt\":\n return math.sqrt(self._rightchild.eval())\n elif self._element == \"sq\":\n value = self._rightchild.eval()\n return value * value\n elif self._rightchild == None:\n print(\"ERROR: no left child for \" + str(self))\n exit(-1)\n else:\n if self._element == \"+\":\n return self._leftchild.eval() + self._rightchild.eval()\n if self._element == \"-\":\n return self._leftchild.eval() - self._rightchild.eval()\n if self._element == \"*\":\n return self._leftchild.eval() * self._rightchild.eval()\n if self._element == \"/\":\n return self._leftchild.eval() / self._rightchild.eval()", "def literal_eval(node_or_string):\r\n _safe_names = {'None': None, 'True': True, 'False': False}\r\n if isinstance(node_or_string, basestring):\r\n node_or_string = parse(node_or_string, mode='eval')\r\n if isinstance(node_or_string, Expression):\r\n node_or_string = node_or_string.body\r\n def _convert(node):\r\n if isinstance(node, Str):\r\n return node.s\r\n elif isinstance(node, Num):\r\n return node.n\r\n elif isinstance(node, Tuple):\r\n return tuple(map(_convert, node.elts))\r\n elif isinstance(node, List):\r\n return list(map(_convert, node.elts))\r\n elif isinstance(node, Dict):\r\n return dict((_convert(k), _convert(v)) for k, v\r\n in zip(node.keys, node.values))\r\n elif isinstance(node, Name):\r\n if node.id in _safe_names:\r\n return _safe_names[node.id]\r\n raise ValueError('malformed string')\r\n return _convert(node_or_string)", "def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)", "def xpathEvalExpr(self):\n libxml2mod.xmlXPathEvalExpr(self._o)", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def evaluate(self):\n raise Exception(\"Not implemented.\")", "def xpath_eval(self,expr,namespaces=None):\n ctxt = common_doc.xpathNewContext()\n ctxt.setContextNode(self.xmlnode)\n ctxt.xpathRegisterNs(\"ns\",to_utf8(self.ns))\n if namespaces:\n for prefix,uri in namespaces.items():\n ctxt.xpathRegisterNs(prefix,uri)\n ret=ctxt.xpathEval(expr)\n ctxt.xpathFreeContext()\n return ret", "def evaluate_expression_tree(root:Node) -> float:\n if root is None:\n return 0\n if root._left is None and root._right is None:\n return float(root._data)\n left_sum = evaluate_expression_tree(root._left)\n right_sum = evaluate_expression_tree(root._right)\n if root._data == '+':\n return left_sum + right_sum\n elif root._data == '-':\n return left_sum - right_sum\n elif root._data == '*':\n return left_sum * right_sum\n elif root._data == '/':\n return left_sum / right_sum\n elif root._data == '^':\n return left_sum ** right_sum\n else:\n raise ArithmeticError(root._data)", "def expression_ast_node(self):\n node = self.ast_node\n if len(node.body) == 1 and isinstance(node.body[0], ast.Expr):\n return ast.Expression(node.body[0].value)\n else:\n return None", "def visit_expr(self, *args):\n return _ida_hexrays.ctree_visitor_t_visit_expr(self, *args)", "def test_generated_parser(self):\r\n char_stream = antlr3.ANTLRStringStream('4 + 5\\n')\r\n lexer = ExprLexer(char_stream)\r\n tokens = antlr3.CommonTokenStream(lexer)\r\n parser = ExprParser(tokens)\r\n r = parser.prog()\r\n\r\n # this is the root of the AST\r\n root = r.tree\r\n\r\n nodes = antlr3.tree.CommonTreeNodeStream(root)\r\n nodes.setTokenStream(tokens)\r\n eval = Eval(nodes)\r\n eval.prog()", "def interpret(self):\n tree = self.parser.parse()\n if tree is None:\n return ''\n self.visit(tree)", "def evaluate(self) :\n pass", "def execute(self):\n if self.backend is not None:\n self._process_node(self.ast)\n return self.backend.get_output()\n else:\n raise UnrollerError(\"backend not attached\")", "def visitExpression(self, node):\n self.set_lineno(node)\n self.scope = self.ctx.scopes[node]\n self.visit(node.node)\n self.emit('RETURN_VALUE')", "def explore_expr(expr, value, is_child):\n referenced_value = value.referenced_value()\n Explorer.explore_expr(expr, referenced_value, is_child)\n return False", "def eval(self, expression: str) ->'RDLValue':\n # Create local message handler that suppresses the usual output\n # to stderr.\n # Instead raises ValueError on any error\n msg_printer = messages.MessageExceptionRaiser()\n msg_handler = messages.MessageHandler(msg_printer)\n\n input_stream = InputStream(expression)\n\n parsed_tree = sa_systemrdl.parse(\n input_stream,\n \"eval_expr_root\",\n messages.RdlSaErrorListener(msg_handler)\n )\n\n visitor = ExprVisitor(self)\n\n # override visitor to use local message handler\n visitor.msg = msg_handler\n\n result = visitor.visit(parsed_tree)\n result.predict_type()\n return result.get_value()", "def visit(self, node):", "def visit(self, node):", "def evaluate(self):\r\n raise Exception(\"Not implemented.\")", "def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):", "def evaluateValue(compiled_expression):", "def test(self):\n self.eval()", "def visit_Node(self, node):\n pass", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def evaluate(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: obj.evaluate(self.seq, *args),\n self.begin, self.data)\n return data", "def xpathEvalExpression(self, str):\n ret = libxml2mod.xmlXPathEvalExpression(str, self._o)\n if ret is None:raise xpathError('xmlXPathEvalExpression() failed')\n return xpathObjectRet(ret)", "def eval_expr(expr, parameter):\n expr = prepare_expr(expr, parameter)\n return eval_(ast.parse(expr, mode='eval').body)", "def test_ChangeValueTree():\n Tree = graph.oval_graph.OvalNode(1, 'operator', 'and', False, [\n graph.oval_graph.OvalNode(2, 'value', \"true\", False),\n graph.oval_graph.OvalNode(3, 'value', \"false\", False),\n graph.oval_graph.OvalNode(4, 'operator', 'or', False, [\n graph.oval_graph.OvalNode(5, 'value', \"false\", False),\n graph.oval_graph.OvalNode(6, 'value', \"true\", False)\n ]\n )\n ]\n )\n\n Tree.change_tree_value(3, \"true\")\n tests.any_test_help.any_test_treeEvaluation_with_tree(Tree, \"true\")", "def eval_graph(self, x, scope, **kwargs):\n raise NotImplementedError(\"Please implement evaluation graph\")", "def evaluate(self):\n try:\n test_val = self.expression()\n return test_val != 0\n except ValueError:\n raise ParseError(\"Could not evaluate expression.\")", "def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False", "def literal_eval(node_or_string):\n _safe_names = {'None': None, 'True': True, 'False': False}\n if isinstance(node_or_string, str):\n node_or_string = ast.parse(node_or_string, mode='eval')\n if isinstance(node_or_string, ast.Expression):\n node_or_string = node_or_string.body\n\n def _convert(node):\n if compat.is_ast_str(node) or compat.is_ast_num(node):\n return compat.get_ast_const(node)\n elif isinstance(node, ast.Tuple):\n return tuple(map(_convert, node.elts))\n elif isinstance(node, ast.List):\n return list(map(_convert, node.elts))\n elif isinstance(node, ast.Dict):\n return dict((_convert(k), _convert(v)) for k, v\n in zip(node.keys, node.values))\n elif isinstance(node, ast.Name):\n if node.id in _safe_names:\n return _safe_names[node.id]\n elif (isinstance(node, ast.BinOp) and\n isinstance(node.op, (ast.Add, ast.Sub)) and\n compat.is_ast_num(node.right) and\n isinstance(compat.get_ast_const(node.right), complex) and\n compat.is_ast_num(node.left) and\n isinstance(compat.get_ast_const(node.left), (int, float))): # TODO: long,\n left = compat.get_ast_const(node.left)\n right = compat.get_ast_const(node.right)\n if isinstance(node.op, ast.Add):\n return left + right\n else:\n return left - right\n raise ValueError('malformed string')\n return _convert(node_or_string)", "def interpret(self):\n tree = self.parser.parse()\n if tree is None:\n return ''\n return self.visit(tree)", "def eval_expr(e, store):\n if e.type == 'IntLit':\n return e.value\n elif e.type == 'IntVar':\n index = eval_expr(e.children[0], store)\n return store.get(e.value, {}).get(index, 0)\n elif e.type == 'Random':\n rg = eval_expr(e.children[0], store)\n return random.randint(0, rg-1)\n elif e.type == 'IntOp':\n lhs = eval_expr(e.children[0], store)\n rhs = eval_expr(e.children[1], store)\n if e.value == '+':\n return lhs + rhs\n elif e.value == '-':\n return lhs - rhs\n elif e.value == '*':\n return lhs * rhs\n elif e.value == '/':\n if rhs == 0:\n return 0\n else:\n return lhs // rhs\n else:\n raise NotImplementedError(e.value)", "def _Expr(self, tree):\n # Catch odd case of multi line strings and doc strings which are Expr with a Constant string type value\n if isinstance(tree.value, ast.Constant):\n if isinstance(tree.value.value, str):\n return\n # catch special case of Python 3.7 Where doc string is a Str and not a Constant\n elif isinstance(tree.value, ast.Str):\n return \n # otherwise treat like a normal expression\n self.fill()\n self.dispatch(tree.value)\n self.write(\";\")", "def ev(expr):\n return eval(expr,user_ns())", "def Eval(expression):\n # pylint: disable=eval-used\n return eval(expression)", "def evaluate(self, element):\n raise NotImplementedError", "def _simplify(parser_tree, node):\n if type(node) == Variable: # If this Node is a variable\n return False # Then this Nodes parent can't be simplified\n\n elif type(node)in (Number, Constant, ParsedFunction): # If this Node is Number or a Constant\n return True # Then this Nodes parent can be simplified\n\n else:\n for child in node.get_child_list(): # Try to simplify each child\n if not Calculator._simplify(parser_tree, child): # If one child can't be simplified\n return False # Then this Node can't be simplified as well\n\n parent = node.get_parent()\n if type(node) == Operator:\n operation = node.get_value()\n operand_0 = node.get_child(0).get_value() # Get the value of each children\n operand_1 = node.get_child(1).get_value()\n value = operation(operand_0, operand_1) # Calculate the value of this Node\n if parent is not None:\n parent.replace_child(node, Number(str(value), value, parent))\n else:\n parser_tree.set_root(Number(str(value), value, parent))\n\n elif type(node) == Function:\n function = node.get_value()\n arguments = []\n for child in node.get_child_list(): # Get the value of each child\n arguments.append(child.get_value())\n value = function(*arguments) # Calculate the value of this Node\n if parent is not None:\n parent.replace_child(node, Number(str(value), value, parent))\n else:\n parser_tree.set_root(Number(str(value), value, parent))\n\n return True", "def eval_expr(expr):\n match expr:\n case BinaryOp('+', left, right):\n return eval_expr(left) + eval_expr(right)\n case BinaryOp('-', left, right):\n return eval_expr(left) - eval_expr(right)\n case BinaryOp('*', left, right):\n return eval_expr(left) * eval_expr(right)\n case BinaryOp('/', left, right):\n return eval_expr(left) / eval_expr(right)\n case UnaryOp('+', arg):\n return eval_expr(arg)\n case UnaryOp('-', arg):\n return -eval_expr(arg)\n case VarExpr(name):\n raise ValueError(f\"Unknown value of: {name}\")\n case float() | int():\n return expr\n case _:\n raise ValueError(f\"Invalid expression value: {repr(expr)}\")", "def generic_visit(self, node: ast.AST) -> None:", "def evaluate(self, state):\n abstract", "def evalOnSubTreeEnd(self, node):\n\n return None", "def evaluate(self, session, *args, evaluate_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement evaluate() method\")", "def test_RestrictingNodeTransformer__visit_GtE__1():\n assert restricted_eval('1 >= 3') is False", "def eval(self):\n return self.with_transforms(\"eval\")", "def test_evaluate():\n # Create a lexer instance with rules and text loaded\n lexer = lex._lexer(\n [lex_bases.rule(\"JUMP_LINE\", r\"\\n\"), lex_bases.rule(\"TEST\", r\"test\")], []\n )._load_text(\"test\")\n\n # Evalueate the loaded text and compare\n assert lexer.evaluate() == [lex_bases.token(\"TEST\", \"test\")]", "def explore_expr(expr, value, is_child):\n print (\"'%s' is a scalar value of type '%s'.\" %\n (expr, value.type))\n print (\"%s = %s\" % (expr, str(value)))\n\n if is_child:\n Explorer.return_to_parent_value_prompt()\n Explorer.return_to_parent_value()\n\n return False", "def can_reevaluate(self, node):\n return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \\\n (six.PY3 and isinstance(node, ast.Bytes)) or \\\n (ast_has_name_constant and isinstance(node, ast.NameConstant))", "def test_RestrictingNodeTransformer__visit_LtE__1():\n assert restricted_eval('1 <= 3') is True", "def evaluationFunction(problem, gFunc, hFunc, node):\n #g = getattr(searchAgents, gFunc)\n #h = getattr(searchAgents, hFunc)\n h = hFunc\n #return g(node) + h(node)\n return gFunc + h(node, problem)", "def _evaluate(self, x):\n raise NotImplementedError()", "def evaluateStructure(compiled_expression):", "def evaluator(*args, clusters: bool=True, configuration: Union[AnyStr, List[AnyStr], bool]=\"\",\n enable: bool=True, info: bool=True, name: Union[AnyStr, bool]=\"\", nodeType:\n Union[AnyStr, List[AnyStr], bool]=\"\", nodeTypeChildren: bool=True, priority:\n Union[int, bool]=0, valueName: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def evaluate_node(self):\n # p, v = np.random.random(225).astype(np.float16), np.random.random()\n socket = zmq.Context().socket(zmq.DEALER)\n socket.setsockopt_string(zmq.IDENTITY, self.player_id)\n socket.connect('ipc://./tmp/oracle_%s' % self.tree.model_name)\n print('start to evaluate', self.tree.model_name)\n while True:\n # print(self.tree.to_evaluate.qsize())\n batch = []\n states = []\n colors = []\n size = self.tree.to_evaluate.qsize()\n if size > config.INFERENCE_BATCHSIZE:\n size = config.INFERENCE_BATCHSIZE\n elif size == 0:\n time.sleep(0.001)\n continue\n for _ in range(size):\n t, black, white = self.tree.to_evaluate.get()\n mine, yours = posswap(t, black, white)\n batch.append((str(mine), str(yours), t % 2))\n states.append((black, white))\n colors.append(t % 2)\n socket.send(msgpack.dumps((batch, self.player_id)))\n result = msgpack.loads(socket.recv())\n assert len(states) == len(result[0])\n assert len(states) == len(result[1])\n for ind, state in enumerate(states):\n with self.lock:\n self.tree.nodes[state].p = result[0][ind]\n if colors[ind] == 0:\n self.tree.nodes[state].v = result[1][ind]\n else:\n self.tree.nodes[state].v = -result[1][ind]\n self.tree.nodes[state].updated = True", "def _compile_node(selector):\n # To avoid precedence-related bugs, any sub-expression that is passed\n # around must be \"atomic\": add parentheses when the top-level would be\n # an operator. Bare literals and function calls are fine.\n\n # 1 and 0 are used for True and False to avoid global lookups.\n\n if isinstance(selector, parser.CombinedSelector):\n left_inside = _compile_node(selector.left)\n if left_inside == '0':\n return '0' # 0 and x == 0\n elif left_inside == '1':\n # 1 and x == x, but the element matching 1 still needs to exist.\n if selector.combinator in (' ', '>'):\n left = 'el.parent is not None'\n elif selector.combinator in ('~', '+'):\n left = 'el.previous is not None'\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n # Rebind the `el` name inside a generator-expressions (in a new scope)\n # so that 'left_inside' applies to different elements.\n elif selector.combinator == ' ':\n left = 'any((%s) for el in el.iter_ancestors())' % left_inside\n elif selector.combinator == '>':\n left = ('next(el is not None and (%s) for el in [el.parent])'\n % left_inside)\n elif selector.combinator == '+':\n left = ('next(el is not None and (%s) for el in [el.previous])'\n % left_inside)\n elif selector.combinator == '~':\n left = ('any((%s) for el in el.iter_previous_siblings())'\n % left_inside)\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n\n right = _compile_node(selector.right)\n if right == '0':\n return '0' # 0 and x == 0\n elif right == '1':\n return left # 1 and x == x\n else:\n # Evaluate combinators right to left:\n return '(%s) and (%s)' % (right, left)\n\n elif isinstance(selector, parser.CompoundSelector):\n sub_expressions = [\n expr for expr in map(_compile_node, selector.simple_selectors)\n if expr != '1']\n if len(sub_expressions) == 1:\n test = sub_expressions[0]\n elif '0' in sub_expressions:\n test = '0'\n elif sub_expressions:\n test = ' and '.join('(%s)' % e for e in sub_expressions)\n else:\n test = '1' # all([]) == True\n\n if isinstance(selector, parser.NegationSelector):\n if test == '0':\n return '1'\n elif test == '1':\n return '0'\n else:\n return 'not (%s)' % test\n else:\n return test\n\n elif isinstance(selector, parser.LocalNameSelector):\n return ('el.local_name == (%r if el.in_html_document else %r)'\n % (selector.lower_local_name, selector.local_name))\n\n elif isinstance(selector, parser.NamespaceSelector):\n return 'el.namespace_url == %r' % selector.namespace\n\n elif isinstance(selector, parser.ClassSelector):\n return '%r in el.classes' % selector.class_name\n\n elif isinstance(selector, parser.IDSelector):\n return 'el.id == %r' % selector.ident\n\n elif isinstance(selector, parser.AttributeSelector):\n if selector.namespace is not None:\n if selector.namespace:\n key = '(%r if el.in_html_document else %r)' % (\n '{%s}%s' % (selector.namespace, selector.lower_name),\n '{%s}%s' % (selector.namespace, selector.name),\n )\n else:\n key = ('(%r if el.in_html_document else %r)'\n % (selector.lower_name, selector.name))\n value = selector.value\n if selector.operator is None:\n return '%s in el.etree_element.attrib' % key\n elif selector.operator == '=':\n return 'el.etree_element.get(%s) == %r' % (key, value)\n elif selector.operator == '~=':\n if len(value.split()) != 1 or value.strip() != value:\n return '0'\n else:\n return (\n '%r in split_whitespace(el.etree_element.get(%s, \"\"))'\n % (value, key))\n elif selector.operator == '|=':\n return ('next(v == %r or (v is not None and v.startswith(%r))'\n ' for v in [el.etree_element.get(%s)])'\n % (value, value + '-', key))\n elif selector.operator == '^=':\n if value:\n return 'el.etree_element.get(%s, \"\").startswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '$=':\n if value:\n return 'el.etree_element.get(%s, \"\").endswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '*=':\n if value:\n return '%r in el.etree_element.get(%s, \"\")' % (value, key)\n else:\n return '0'\n else:\n raise SelectorError(\n 'Unknown attribute operator', selector.operator)\n else: # In any namespace\n raise NotImplementedError # TODO\n\n elif isinstance(selector, parser.PseudoClassSelector):\n if selector.name == 'link':\n return ('%s and el.etree_element.get(\"href\") is not None'\n % html_tag_eq('a', 'area', 'link'))\n elif selector.name == 'enabled':\n return (\n '(%s and el.etree_element.get(\"disabled\") is None'\n ' and not el.in_disabled_fieldset) or'\n '(%s and el.etree_element.get(\"disabled\") is None) or '\n '(%s and el.etree_element.get(\"href\") is not None)'\n % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n html_tag_eq('a', 'area', 'link'),\n )\n )\n elif selector.name == 'disabled':\n return (\n '(%s and (el.etree_element.get(\"disabled\") is not None'\n ' or el.in_disabled_fieldset)) or'\n '(%s and el.etree_element.get(\"disabled\") is not None)' % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n )\n )\n elif selector.name == 'checked':\n return (\n '(%s and el.etree_element.get(\"checked\") is not None and'\n ' ascii_lower(el.etree_element.get(\"type\", \"\")) '\n ' in (\"checkbox\", \"radio\"))'\n 'or (%s and el.etree_element.get(\"selected\") is not None)'\n % (\n html_tag_eq('input', 'menuitem'),\n html_tag_eq('option'),\n )\n )\n elif selector.name in ('visited', 'hover', 'active', 'focus',\n 'target'):\n # Not applicable in a static context: never match.\n return '0'\n elif selector.name == 'root':\n return 'el.parent is None'\n elif selector.name == 'first-child':\n return 'el.index == 0'\n elif selector.name == 'last-child':\n return 'el.index + 1 == len(el.etree_siblings)'\n elif selector.name == 'first-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[:el.index])')\n elif selector.name == 'last-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[el.index + 1:])')\n elif selector.name == 'only-child':\n return 'len(el.etree_siblings) == 1'\n elif selector.name == 'only-of-type':\n return ('all(s.tag != el.etree_element.tag or i == el.index'\n ' for i, s in enumerate(el.etree_siblings))')\n elif selector.name == 'empty':\n return 'not (el.etree_children or el.etree_element.text)'\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n elif isinstance(selector, parser.FunctionalPseudoClassSelector):\n if selector.name == 'lang':\n tokens = [\n t for t in selector.arguments\n if t.type != 'whitespace'\n ]\n if len(tokens) == 1 and tokens[0].type == 'ident':\n lang = tokens[0].lower_value\n else:\n raise SelectorError('Invalid arguments for :lang()')\n\n return ('el.lang == %r or el.lang.startswith(%r)'\n % (lang, lang + '-'))\n else:\n if selector.name == 'nth-child':\n count = 'el.index'\n elif selector.name == 'nth-last-child':\n count = '(len(el.etree_siblings) - el.index - 1)'\n elif selector.name == 'nth-of-type':\n count = ('sum(1 for s in el.etree_siblings[:el.index]'\n ' if s.tag == el.etree_element.tag)')\n elif selector.name == 'nth-last-of-type':\n count = ('sum(1 for s in el.etree_siblings[el.index + 1:]'\n ' if s.tag == el.etree_element.tag)')\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n result = parse_nth(selector.arguments)\n if result is None:\n raise SelectorError(\n 'Invalid arguments for :%s()' % selector.name)\n a, b = result\n # x is the number of siblings before/after the element\n # Matches if a positive or zero integer n exists so that:\n # x = a*n + b-1\n # x = a*n + B\n B = b - 1\n if a == 0:\n # x = B\n return '%s == %i' % (count, B)\n else:\n # n = (x - B) / a\n return ('next(r == 0 and n >= 0'\n ' for n, r in [divmod(%s - %i, %i)])'\n % (count, B, a))\n\n else:\n raise TypeError(type(selector), selector)" ]
[ "0.7579689", "0.7281968", "0.7104537", "0.65870726", "0.63241583", "0.62537116", "0.62466973", "0.6190615", "0.6190615", "0.6085275", "0.60787815", "0.6074576", "0.60320526", "0.6001459", "0.59816545", "0.5969225", "0.5951202", "0.5924543", "0.59197736", "0.5908411", "0.590691", "0.5902896", "0.59014523", "0.58876306", "0.58724046", "0.5871776", "0.5855741", "0.58469516", "0.58241343", "0.5796532", "0.5796532", "0.579045", "0.57842135", "0.576696", "0.5755464", "0.5755464", "0.5755464", "0.57472163", "0.57410663", "0.57252705", "0.5717589", "0.57167345", "0.57136077", "0.5704738", "0.57037693", "0.56991357", "0.5696022", "0.5681736", "0.56769156", "0.5673374", "0.5665556", "0.5664134", "0.56571466", "0.56542015", "0.56462425", "0.5646112", "0.563685", "0.5632326", "0.56233805", "0.5608474", "0.55705154", "0.55705154", "0.5564843", "0.556396", "0.5550882", "0.5544444", "0.55433476", "0.55093193", "0.5508242", "0.55040765", "0.5502839", "0.54944485", "0.54833895", "0.54814506", "0.5478511", "0.54770726", "0.5475873", "0.54732525", "0.54699254", "0.5454667", "0.54338616", "0.5430812", "0.5425954", "0.5422689", "0.54141486", "0.54095024", "0.5407988", "0.54008734", "0.539845", "0.53973556", "0.53862333", "0.5371606", "0.5370335", "0.53663653", "0.53527117", "0.53497934", "0.53452724", "0.5338936", "0.53384894", "0.53252524" ]
0.63417566
4
Warn about unused static variables.
def _find_unused_static_warnings(filename, lines, ast_list): static_declarations = { node.name: node for node in ast_list if (isinstance(node, ast.VariableDeclaration) and 'static' in node.type.modifiers) } def find_variables_use(body): for child in body: if child.name in static_declarations: static_use_counts[child.name] += 1 static_use_counts = collections.Counter() for node in ast_list: if isinstance(node, ast.Function) and node.body: find_variables_use(node.body) elif isinstance(node, ast.Class) and node.body: for child in node.body: if isinstance(child, ast.Function) and child.body: find_variables_use(child.body) for name in sorted(static_declarations): if not static_use_counts[name]: print("{}:{}: unused variable '{}'".format( filename, lines.get_line_number(static_declarations[name].start), name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = nones.union(set(filter(lambda x: str(self.__dict__[x]) == \"\", clsvars)))\n unused = clsvars - variables - exceptions - nones\n return unused", "def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))", "def init_warnings():\n warnings.simplefilter(\"ignore\", category=AstropyWarning)", "def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']", "def warn():\n pass", "def _suppress_warnings():\n import warnings\n import sys\n import os\n if os.path.basename(sys.argv[0]) != \"trial\":\n warnings.simplefilter(\"ignore\")", "def test_no_var_init(self):\n self._test_reports_helper({\"--no-var-init-profiling\": \"\"},\n [\"report.txt\"])", "def warning(self, *args, **kwargs):", "def test_instances(self):\n\n @deprecate(bar=\"use baz instead\")\n def foo(bar=None, baz=None):\n pass\n\n @deprecate(baz=\"use bar instead\")\n def food(bar=None, baz=None):\n pass\n\n with warnings.catch_warnings(record=True) as w:\n foo(bar=True)\n food(baz=True)\n self.assertEqual(len(w), 2, \"Not all warnings preserved.\")", "def filter_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n warnings.simplefilter(\"ignore\", category=LightningDeprecationWarning)", "def log_unused(self, error=True):\n have_unused = False\n log = get_logger().error if error else get_logger().info\n for name in self._all_names:\n current_set = getattr(self, name, None)\n if current_set:\n log('Unused from %s: %s', name.upper(), current_set)\n have_unused = True\n return have_unused", "def skip_require():\n global ignore_once\n ignore_once = True", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def verif_unused(sv):\r\n if Unused in sv.Object and sv.Object[Unused].value: # check presence and integrity of unused list\r\n unusedlist=[applied (x, Unused) for x in sv.Object[Unused].value]\r\n for nam in unusedlist: # check each unused declaration\r\n nod=sv.Object[nam]\r\n if sv.Namedpinlist.get(nam)==[nod.effects]: continue # pin is just named\r\n elif applied(nam, Output):\r\n if len(nod.effects)==1: # only effect is output list\r\n if len(nod.causes)<=2: continue\r\n if len(nod.causes)<=4 and Faux in nod.causes and Ewent in nod.causes: continue # allow 'take event'\r\n elif nod.causes or nod.effects: # object should have no cause and no effect\r\n print(Err_unused_obj) \r\n print(str(nam))\r\n sv.Current_clause=None, None, None\r\n raise ReferenceError", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def warnings():\n return THE_LOGGER.warnings", "def unusedFromKDOTDataPreparation():", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing", "def check_unused_attributes(self):\n all_attrs_read = collections.defaultdict(set)\n\n def _add_attrs(typ, attr_names_read):\n if typ is None:\n return\n all_attrs_read[typ] |= attr_names_read\n for base_cls in typ.__bases__:\n all_attrs_read[base_cls] |= attr_names_read\n if isinstance(typ, type):\n for child_cls in qcore.inspection.get_subclass_tree(typ):\n all_attrs_read[child_cls] |= attr_names_read\n\n for serialized, attrs_read in six.iteritems(self.attributes_read):\n attr_names_read = {attr_name for attr_name, _, _ in attrs_read}\n _add_attrs(self.unserialize_type(serialized), attr_names_read)\n\n for typ, attrs in self.config.IGNORED_UNUSED_ATTRS_BY_CLASS:\n _add_attrs(typ, attrs)\n\n used_bases = tuple(self.config.USED_BASE_CLASSES)\n\n for typ, attrs_read in sorted(\n six.iteritems(all_attrs_read), key=self._cls_sort\n ):\n if self.serialize_type(typ) not in self.classes_examined or issubclass(\n typ, used_bases\n ):\n continue\n existing_attrs = set(typ.__dict__.keys())\n for attr in existing_attrs - attrs_read - self.config.IGNORED_UNUSED_ATTRS:\n # server calls will always show up as unused here\n if _safe_getattr(_safe_getattr(typ, attr, None), \"server_call\", False):\n continue\n print(\"Unused method: %r.%s\" % (typ, attr))", "def warnings(self, d):\n\n if d['filter_nu'] == 220e9:\n if d['beam_shape'] == 'gaussian':\n warnings.warn('The nu dependency of the gausian beam FWHM '\n 'is not a good approximation in the 220 GHz band.')\n elif d['beam_shape'] == 'fitted_beam':\n warnings.warn('Beam and solid angle frequency dependence implementation '\n 'in the 220 GHz band for the fitted beam does not correctly describe '\n 'the true behavior')", "def has_warnings(self) -> bool:", "def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)", "def _warn(msg):\n warnings.warn(msg, TessyWarning, stacklevel=3)", "def warning(self, msg, *args, **kwargs):\n pass", "def is_not_used(self):\n pass", "def ignore_warnings(my_func):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n This is where the warning suppression occurs.\n \"\"\"\n if sys.version_info >= (3, 2):\n warnings.simplefilter(\"ignore\", ResourceWarning)\n with warnings.catch_warnings():\n my_func(self, *args, **kwargs)\n\n return wrapper", "def static_check(self):\n self._processor.static_check(self)", "def warningglobal(self, *args, **kwargs):\n return self.logger.log(logging.WARNING+1, *args, **kwargs)", "def check_unused_args(self, used_args, args, kwargs):\n for k, v in kwargs.items():\n if k in used_args:\n self._used_kwargs.update({k: v})\n else:\n self._unused_kwargs.update({k: v})", "def _check_function_unused_vars(self, scope, args, enclosing_statement=None):\n all_def_nodes = set(\n chain.from_iterable(scope.name_to_all_definition_nodes.values())\n )\n all_used_def_nodes = set(\n chain.from_iterable(scope.usage_to_definition_nodes.values())\n )\n arg_nodes = set(args)\n all_unused_nodes = all_def_nodes - all_used_def_nodes\n for unused in all_unused_nodes:\n # Ignore names not defined through a Name node (e.g., some function arguments)\n if not isinstance(unused, ast.Name):\n continue\n # Ignore names that are meant to be ignored\n if unused.id.startswith(\"_\"):\n continue\n # Ignore arguments\n if unused in arg_nodes:\n continue\n # Ignore names involved in global and similar declarations\n if unused.id in scope.accessed_from_special_nodes:\n continue\n replacement = None\n if self._name_node_to_statement is not None:\n # Ignore some names defined in unpacking assignments. This should behave as follows:\n # a, b = c() # error only if a and b are both unused\n # a, b = yield c.asynq() # same\n # a, b = yield (func1.asynq(), func2.asynq()) # error if either a or b is unused\n # [None for i in range(3)] # error\n # [a for a, b in pairs] # no error\n # [None for a, b in pairs] # error\n statement = self._name_node_to_statement[unused]\n if isinstance(statement, ast.Assign):\n # it's an assignment\n if not (\n isinstance(statement.value, ast.Yield)\n and isinstance(statement.value.value, ast.Tuple)\n ):\n # but not an assignment originating from yielding a tuple (which is probably an\n # async yield)\n\n # We need to loop over the targets to handle code like \"a, b = c = func()\". If\n # the target containing our unused variable is a tuple and some of its members\n # are not unused, ignore it.\n partly_used_target = False\n for target in statement.targets:\n if (\n isinstance(target, (ast.List, ast.Tuple))\n and _contains_node(target.elts, unused)\n and not _all_names_unused(target.elts, all_unused_nodes)\n ):\n partly_used_target = True\n break\n if partly_used_target:\n continue\n if len(statement.targets) == 1 and not isinstance(\n statement.targets[0], (ast.List, ast.Tuple)\n ):\n replacement = self.remove_node(unused, statement)\n elif isinstance(statement, ast.comprehension):\n if isinstance(statement.target, ast.Tuple):\n if not _all_names_unused(\n statement.target.elts, all_unused_nodes\n ):\n continue\n else:\n replacement = self.replace_node(\n unused,\n ast.Name(id=\"_\", ctx=ast.Store()),\n enclosing_statement,\n )\n elif hasattr(ast, \"AnnAssign\") and isinstance(statement, ast.AnnAssign):\n # (Python 3.6+ only) ignore assignments in AnnAssign nodes, which don't actually\n # bind the name\n continue\n self._show_error_if_checking(\n unused,\n \"Variable {} is not read after being written to\".format(unused.id),\n error_code=ErrorCode.unused_variable,\n replacement=replacement,\n )", "def variables_used (self) :\r\n\t\treturn []", "def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6", "def has_warnings_active(self) -> bool:", "def static():\n pass", "def static():\n pass", "def warning(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def test_unexpected_field_warning(shutdown_only):\n ray.init(runtime_env={\"unexpected_field\": \"value\"})\n\n @ray.remote\n def f():\n return True\n\n # Run a task to trigger runtime_env creation.\n assert ray.get(f.remote())\n\n # Check that the warning is logged.\n session_dir = ray._private.worker.global_worker.node.address_info[\"session_dir\"]\n log_path = Path(session_dir) / \"logs\"\n\n # Check that a warning appears in some \"runtime_env_setup*.log\"\n wait_for_condition(\n lambda: any(\n \"unexpected_field is not recognized\" in open(f).read()\n for f in log_path.glob(\"runtime_env_setup*.log\")\n )\n )", "def warning(self, *args, **kwargs): # real signature unknown\n pass", "def get_warnings(self):\n pass", "def test_warning_with_no_api_key(self):\n with warnings.catch_warnings(record=True) as w:\n Yandex()\n self.assertEqual(len(w), 1)", "def warning(self, warning):\n pass", "def get_unused_indices(program):\n used = get_used_indices(program)\n all_indices = set(range(len(program.var_types) - 1))\n return all_indices - used", "def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))", "def _enabled_warnings(self):\n with warnings.catch_warnings():\n if self.warnings:\n # if self.warnings is set, use it to filter all the warnings\n warnings.simplefilter(self.warnings)\n # if the filter is 'default' or 'always', special-case the\n # warnings from the deprecated unittest methods to show them\n # no more than once per module, because they can be fairly\n # noisy. The -Wd and -Wa flags can be used to bypass this\n # only when self.warnings is None.\n if self.warnings in ['default', 'always']:\n warnings.filterwarnings(\n 'module',\n category=DeprecationWarning,\n message=r'Please use assert\\w+ instead.')\n yield", "def tearDown(self):\n warnings.resetwarnings()", "def noCheck():\n dislin.nochek()", "def warnings(self) -> List[Error]:", "def reset_vo_warnings():\n from . import converters, xmlutil\n\n # -----------------------------------------------------------#\n # This is a special variable used by the Python warnings #\n # infrastructure to keep track of warnings that have #\n # already been seen. Since we want to get every single #\n # warning out of this, we have to delete all of them first. #\n # -----------------------------------------------------------#\n for module in (converters, exceptions, tree, xmlutil):\n try:\n del module.__warningregistry__\n except AttributeError:\n pass", "def get_public_variables(t):\n return [i[0] for i in\n inspect.getmembers(t, lambda i:not inspect.isroutine(i))\n if not i[0].startswith(\"__\")]", "def test_cclwarning_not_equal():\n w = pyccl.CCLWarning(\"blah\")\n w2 = pyccl.CCLWarning(\"blahh\")\n assert w is not w2\n assert w != w2\n assert hash(w) != hash(w2)\n\n v = pyccl.CCLDeprecationWarning(\"blah\")\n v2 = pyccl.CCLDeprecationWarning(\"blahh\")\n assert v is not v2\n assert v != v2\n assert hash(v) != hash(v2)", "def calculate_diagnostic_vars(self):\n pass", "def _filterls(locals):\n for var in locals.keys():\n if var.startswith(\"_\"): del locals[var]\n return locals", "def ignore_python_warnings(function):\n\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n \"\"\"\n Wrapped function.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n return function(*args, **kwargs)\n\n return wrapped", "def disableIncorrectNameWarning(*args, **kwargs)->None:\n pass", "def get_unused_kwargs(self):\n return self._unused_kwargs", "def warnings_active(self) -> List[Error]:", "def report_unused_cycle_suppressions(self, reporter):\n for module in self.get_modules():\n for dep in module.get_dependencies():\n if not dep.suppression_used:\n reporter.cyclic_issue(\"unused cycle suppression: {0} -> {1}\".format(module.get_name()[7:], dep.get_other_module().get_name()[7:]))", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def get_unused_annotations(graph):\n return graph.defined_annotation_keywords - get_annotations(graph)", "def is_ignored(self):", "def test_spidermonkey_warning():\n # The following is attempting to store the octal \"999999999999\" in x, but\n # this is an invalid octal obviously. We need to \"use strict\" here because\n # the latest versions of spidermonkey simply accept that as a base 10\n # number, despite the \"0\" prefix.\n # We need spidermonkey to choke on this code, and this test makes sure that\n # when spidermonkey does, it doesn't break the validator.\n assert _do_test_raw(\"\"\"\n \"use strict\";\n var x = 0999999999999;\n \"\"\").failed()", "def allow_unresolved_variable_tokens(self):\n return self._allow_unresolved_variable_tokens", "def ignore_builtin_verification():\n return not current_space().skip_builtin_verification", "def lint():\n toolkit.lint(exclude=[\"__init__.py\"])", "def notice(self, warning):\n pass", "def silence_requests_warnings():\n requests.packages.urllib3.disable_warnings()", "def init_vars(self):\n # type: () -> None\n raise NotImplementedError", "def __init__(self, *unused_args, **unused_kwargs):", "def exported(*variables):\n ignored(variables)", "def warning(self, msg):\r\n self.logger.warning(msg)", "def fix_static_global_kernels(in_txt):\n in_txt = in_txt.replace(\" __global__ static\", \"__global__\")\n return in_txt", "def _suppressGoogleLogWarning():\n\n try:\n # Tensorflow uses Google's abseil-py library, which uses a Google-specific\n # wrapper for logging. That wrapper will write a warning to sys.stderr if\n # the Google command-line flags library has not been initialized.\n #\n # https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825\n #\n # We don't want this here because we have our own logging setup.\n import absl.logging\n\n # https://github.com/abseil/abseil-py/issues/99\n logging.root.removeHandler(absl.logging._absl_handler)\n # https://github.com/abseil/abseil-py/issues/102\n absl.logging._warn_preinit_stderr = False\n except Exception:\n pass", "def get_all_variables(instance):\n return [v for v in dir(instance) if not callable(getattr(instance, v))]", "def _filter_info_warning(lines):\n lines = list(filter(lambda x: 'RuntimeWarning' not in x, lines))\n return lines", "def _strict_warning(self):\n if self.options.get('strict', True):\n return ('Strict mode enabled (the default), so this could be due to an '\n 'integer key, such as an HTTP status code.')\n return ('Strict mode disabled. Prance cannot help you narrow this further '\n 'down, sorry.')", "def global_check(self):\n return None", "def allow_warnings(self):\n return self._allow_warnings", "def checking_without_helper(cls):\n cls._checking_need_helper_ = False\n return cls", "def test_debug_ok_with_not_whitelisted_keyword():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.debug(\n \"Hello {goodbye}!\",\n extra=dict(\n goodbye=\"{}\",\n ),\n )\n logging.info(\n \"Hello {hello}!\",\n extra=dict(\n hello=\"{}\",\n ),\n )\n \"\"\"))\n whitelist = Whitelist(group=\"logging.extra.example\")\n visitor = LoggingVisitor(whitelist=whitelist)\n visitor.visit(tree)\n\n assert_that(whitelist, contains(\"world\"))\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(WHITELIST_VIOLATION.format(\"hello\"))))", "def dummy_function(verbose = True, *args, **kwargs):\n if verbose: print(__name__)\n warnings.simplefilter('always')\n print(args)\n print(kwargs)\n\n\n # warnings.warn(\"WARNING\")\n\n if not args and not kwargs:\n warnings.warn( \"You didn't pass any *args or **kwargs\", RuntimeWarning)\n\n else:\n if args:\n for i, arg in enumerate(args):\n print('an arg passed via *args: ', repr(arg))\n else:\n warnings.warn( \"You didn't pass any *args\", RuntimeWarning)\n\n if kwargs:\n for key, value in kwargs.items():\n print('a **kwarg: ', repr(key), ' == ' , repr(value))\n else:\n warnings.warn( \"You didn't pass any **kwargs\", RuntimeWarning)\n pass", "def __init__(self):\n FooBar = None\n Foo = None\n FOO = None\n foo_bar = None", "def test_extra_with_not_whitelisted_keyword():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\n \"Hello {hello}!\",\n extra=dict(\n hello=\"{}\",\n ),\n )\n \"\"\"))\n whitelist = Whitelist(group=\"logging.extra.example\")\n visitor = LoggingVisitor(whitelist=whitelist)\n visitor.visit(tree)\n\n assert_that(whitelist, contains(\"world\"))\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(WHITELIST_VIOLATION.format(\"hello\"))))", "def dont_track(obj):\n obj._dont_track = True\n return obj", "def test_created():\n assert len(dir(constants)) > 300\n assert hasattr(constants, \"Planck_constant\") == True", "def test_debug_prefix_ok_with_not_whitelisted_keyword():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\n \"Hello {debug_hello}!\",\n extra=dict(\n debug_hello=\"{}\",\n ),\n )\n \"\"\"))\n whitelist = Whitelist(group=\"logging.extra.example\")\n visitor = LoggingVisitor(whitelist=whitelist)\n visitor.visit(tree)\n\n assert_that(whitelist, contains(\"world\"))\n assert_that(visitor.violations, is_(empty()))", "def test_warnings():\n tree = parse(dedent(\"\"\"\\\n import warnings\n\n warnings.warn(\"Hello World!\")\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, is_(empty()))", "def __init__(self, message=\"Undefned AssertionError\"):\n config.log.critical(\"%s\" % (message))", "def go_quiet():\n my_logger = logging.getLogger(__name__)\n my_logger.setLevel(LEVELS['WARNING'])", "def clearWarnings():\n for name, mod in list(sys.modules.items()):\n try:\n reg = getattr(mod, \"__warningregistry__\", None)\n except ImportError:\n continue\n if reg:\n reg.clear()", "def warning(self, msg):\n self.__logger.warning(msg)", "def get_warning(self) -> List[str]:\n return []", "def get_warning(self) -> List[str]:\n return []", "def warning(self, msg, *args, **kwargs):\n self._log(\"WARNING\", msg, args, kwargs)" ]
[ "0.6999553", "0.684138", "0.61850595", "0.6036887", "0.58252496", "0.57561684", "0.5704306", "0.56855494", "0.56610996", "0.56531423", "0.5616008", "0.55835503", "0.5551007", "0.55266124", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5493492", "0.548651", "0.5421703", "0.54148483", "0.53651136", "0.53480744", "0.5334751", "0.53262484", "0.5315057", "0.53142375", "0.52850854", "0.52843624", "0.528383", "0.52603155", "0.5255535", "0.5228435", "0.52185", "0.51844734", "0.518144", "0.518144", "0.51700956", "0.515811", "0.5141937", "0.5135551", "0.5135267", "0.51099885", "0.5100723", "0.50907254", "0.5051441", "0.503209", "0.5032024", "0.5031712", "0.5018265", "0.5014235", "0.5005558", "0.50052774", "0.49955183", "0.49834967", "0.49602237", "0.49456438", "0.4940326", "0.49345353", "0.49256006", "0.49231187", "0.49065137", "0.48946315", "0.4890521", "0.48674095", "0.48323467", "0.48305577", "0.48289767", "0.48273262", "0.4813594", "0.48113966", "0.48070735", "0.48068082", "0.48067984", "0.48025396", "0.47964704", "0.47825873", "0.4775762", "0.4772168", "0.47662753", "0.4763363", "0.4762532", "0.4753901", "0.4749228", "0.4741063", "0.4738334", "0.47338971", "0.47225258", "0.47212413", "0.4720154", "0.47183192", "0.4717647", "0.47137764", "0.47137764", "0.4705771" ]
0.7391628
0
Implemented the validation step in training.
def validate(self): # start validate self.model.eval() preds, labels = [], [] for batch_idx, data in enumerate(self.valid_dataloader): # calculate and log losses losses_report, valid_preds, valid_labels = self.forward_one_batch( data) self._update_losses(losses_report, train=False) preds.append(valid_preds) labels.append(valid_labels) preds = np.concatenate(preds, axis=0) labels = np.concatenate(labels, axis=0) if IS_REG: preds = disc(preds) # calculate and log metrics metrics_report = self.evaluate_metrics(preds, labels) self._update_metrics(metrics_report, train=False) # TODO: lr scheduler step setting self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg) # end validate self.model.train()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def validation_step(self):\n # NO NEED TO CHANGE THIS FUNCTION\n logits = self.model.forward(self.X_val)\n loss = cross_entropy_loss(Y_val, logits)\n\n accuracy_train = calculate_accuracy(\n X_train, Y_train, self.model)\n accuracy_val = calculate_accuracy(\n X_val, Y_val, self.model)\n return loss, accuracy_train, accuracy_val", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def train_with_validation_provided(self, features, labels, validation_features, validation_labels):\n pass", "def _perform_validation(self):\n # -- Extract the information of the current fold -- #\n trained_on_folds = self.already_trained_on[str(self.fold)]\n\n # -- Extract all tasks into a list to loop through -- #\n tasks = list(self.mh_network.heads.keys())\n\n # -- Add the current trainer_class name to prev_trainer, so the loop does not end in an error -- #\n # -- since this trainer is not yet a prev_trainer.. --> Remove the trainer again after the loop -- #\n # -- because this creates only a view and changes self.already_trained_on as well which we do not want to -- #\n trained_on_folds['prev_trainer'].append(self.trainer_class_name)\n \n # -- NOTE: Since the head is an (ordered) ModuleDict, the current task is the last head, so there -- #\n # -- is nothing to restore at the end. -- #\n # -- NOTE: Since the current task the model is training on is always added at the end of the list, -- #\n # -- After this loop everything is automatically set as before, so no restoring needs to be done -- #\n # -- For each previously trained task perform the validation on the full validation set -- #\n running_task_list = list()\n for idx, task in enumerate(tasks):\n # -- Update running task list and create running task which are all (trained tasks and current task joined) for output folder name -- #\n running_task_list.append(task)\n running_task = join_texts_with_char(running_task_list, '_')\n\n # -- Get default configuration for nnunet/nnunet_ext model (finished training) -- #\n plans_file, _, self.dataset_directory, _, stage, \\\n _ = get_default_configuration(self.network_name, task, running_task, trained_on_folds['prev_trainer'][idx],\\\n self.tasks_joined_name, self.identifier, extension_type=self.extension)\n\n # -- Load the plans file -- #\n self.plans = load_pickle(plans_file)\n\n # -- Extract the folder with the preprocessed data in it -- #\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % stage)\n \n # -- Create the corresponding dataloaders for train ind val (dataset loading and split performed in function) -- #\n # -- Since we do validation, there is no need to unpack the data -- #\n self.dl_tr, self.dl_val = self.get_basic_generators()\n\n # -- Load the dataset for the task from the loop and perform the split on it -- #\n #self.dataset = load_dataset(folder_with_preprocessed_data)\n #self.do_split()\n\n # -- Extract corresponding self.val_gen --> the used function is extern and does not change any values from self -- #\n self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, # Changed due to do_split ;)\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params,\n deep_supervision_scales=self.deep_supervision_scales,\n pin_memory=self.pin_memory,\n use_nondetMultiThreadedAugmenter=False)\n # -- Update the log -- #\n self.print_to_log_file(\"Performing validation with validation data from task {}.\".format(task))\n\n # -- Activate the current task to train on the right model -- #\n # -- Set self.network, since the parent classes all use self.network to train -- #\n # -- NOTE: self.mh_network.model is also updated to task split ! -- #\n self.network = self.mh_network.assemble_model(task)\n \n # -- For evaluation, no gradients are necessary so do not use them -- #\n with torch.no_grad():\n # -- Put current network into evaluation mode -- #\n self.network.eval()\n # -- Run an iteration for each batch in validation generator -- #\n for _ in range(self.num_val_batches_per_epoch):\n # -- Run iteration without backprop but online_evaluation to be able to get TP, FP, FN for Dice and IoU -- #\n _ = self.run_iteration(self.val_gen, False, True)\n \n # -- Calculate Dice and IoU --> self.validation_results is already updated once the evaluation is done -- #\n self.finish_online_evaluation_extended(task)\n\n # -- Remove the trainer now from the list again -- #\n trained_on_folds['prev_trainer'] = trained_on_folds['prev_trainer'][:-1]\n\n # -- Save the dictionary as json file in the corresponding output_folder -- #\n save_json(self.validation_results, join(self.output_folder, 'val_metrics.json'))\n\n # -- Update already_trained_on if not already done before -- #\n if not self.already_trained_on[str(self.fold)]['val_metrics_should_exist']:\n # -- Set to True -- #\n self.already_trained_on[str(self.fold)]['val_metrics_should_exist'] = True\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))", "def _validate_training_process(self, sess, epoch):\n logger.info('Epoch %d: validating training process ...' % epoch)\n \n if self.val_cpu_only:\n logger.warn('The option \\'val_cpu_only\\' is enabled, but not ' + \\\n 'supported by this class. Option will be ignored.')\n\n val_handle = sess.run(self._val_iter.string_handle())\n sess.run(self._val_iter.initializer,\n feed_dict={self._t_val_raw_in: self._val_batch[0],\n self._t_val_raw_out: self._val_batch[1],\n self._t_val_batch_size: self._val_batch[0].shape[0]})\n\n mi_estimate, mi_real, summary = sess.run( \\\n [self._t_mi, self._t_real_mi, self._t_summaries],\n feed_dict={self._t_handle: val_handle,\n self._t_mi_known: True,})\n\n logger.info('Real MI: %f' % mi_real)\n logger.info('Estimated MI on validation batch: %f' % mi_estimate)\n\n self._val_summary_writer.add_summary(summary, epoch)\n self._val_summary_writer.flush()\n\n logger.info('Epoch %d: validating training process ... Done' % epoch)", "def _validate(self, global_step):\n # Todo clean summaries and add example outputs\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)\n del validation_out['summary_ops']\n return validation_out", "def run_validation_step(self):\n if (self.iter % self.cfg.VALIDATION_PERIOD) == 0:\n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n# val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def validate(self):\n X_orig = make_X_from_features(self._conf)\n train_sz = len(load_array(self._conf, 'task.dataset.id_train'))\n X = X_orig[:train_sz, :]\n y = load_array(self._conf, 'task.dataset.y_train')\n y = y.reshape(y.size)\n\n cv_method_name = self._conf['task']['params']['validation']['class']\n cv_params_name = self._conf['task']['params']['validation'].get(\n 'params', {})\n cv_params_name = _to_str_value(cv_params_name)\n\n cv_method = dynamic_load(cv_method_name)\n mean_cv_score = cv_method(X, y, self, **cv_params_name)\n\n task_metrics = self._conf['task']['params']['metrics']\n task_method = task_metrics['method']\n\n ume.db.add_validation_score(\n os.path.basename(self._jn),\n ume.__version__,\n task_method,\n mean_cv_score)", "def test_training(self):\n\t\tpass", "def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.training_forward(x)\n loss = self.loss_function(y_hat, y)\n if self.project_parameters.loss_function == 'BCELoss':\n val_step_accuracy = self.accuracy(y_hat.argmax(-1), y.argmax(-1))\n elif self.project_parameters.loss_function == 'CrossEntropyLoss':\n val_step_accuracy = self.accuracy(F.softmax(y_hat, dim=-1), y)\n return {'loss': loss, 'accuracy': val_step_accuracy}", "def validation_step(self, *args: Any, **kwargs: Any) -> None:\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n try:\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except ValueError:\n pass", "def _doValidation(self, val_dl: torch.utils.data.DataLoader):\n\n # Initialize variables for tracking loss, correct predictions, total samples, and labels\n val_loss = 0.0\n correct = 0\n total = 0\n true_labels = []\n pred_labels = []\n\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the validation data loader\n for x_batch, y_batch in val_dl:\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n val_loss += loss.item()\n\n # Get the predicted labels by selecting the maximum value along the second dimension\n _, predicted = torch.max(y_pred.data, 1)\n # Update the count of total samples and correct predictions\n total += y_batch.size(0)\n correct += (predicted == y_batch).sum().item()\n\n # Extend the true and predicted labels lists\n true_labels.extend(y_batch.tolist())\n pred_labels.extend(predicted.tolist())\n\n # Compute the average validation loss\n val_loss /= len(val_dl)\n # Calculate the weighted F1 score for the true and predicted labels\n val_f1 = f1_score(true_labels, pred_labels, average='weighted') * 100\n\n # Return the validation loss, F1 score, true labels, and predicted labels\n return val_loss, val_f1, true_labels, pred_labels", "def validation_step(self, batch, running_val_data):\n log.warning(\"Running a ModelInterface validation step that was not overriden: this is a no-op.\")\n return {}", "def run_validation(self):\n # Build a list of validation .hdf5 file paths:\n files = []\n for fname in os.listdir(self.hdf5_directory):\n fpath = os.path.join(self.hdf5_directory, fname)\n if os.path.isfile(fpath) and fname.startswith('validation.') and fname.endswith('.hdf5'):\n files.append(fpath)\n f_start_id = 0\n files.sort()\n num_files = len(files)\n\n # Select first .hdf5 file\n if \\\n torch.distributed.is_initialized() \\\n and torch.distributed.get_world_size() > num_files:\n\n remainder = torch.distributed.get_world_size() % num_files\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_start_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n\n # Set previous_file variable for next iteration\n previous_file = hdf5_fpath\n\n # Load the pre-training data from the .hdf5 file\n pretraining_data = PretrainingDataset(\n hdf5_fpath=hdf5_fpath,\n max_masked_tokens_per_input=self.max_masked_tokens_per_input\n )\n validation_sampler = RandomSampler(pretraining_data) # This could be SequentialSampler\n validation_dataloader = DataLoader(\n pretraining_data,\n sampler=validation_sampler,\n batch_size=self.batch_size * self.n_gpu,\n num_workers=4, pin_memory=True\n )\n\n steps = 0\n average_loss = 0.0 # averaged loss every self.log_freq steps\n\n # Use model in `evaluation mode`\n with torch.no_grad():\n self.model.eval()\n if self.is_main_process:\n logging.info(\"*************************\")\n logging.info(\"** Evaluation step **\")\n logging.info(\"*************************\")\n\n # Loop over the rest of pre-training data files\n pool = ProcessPoolExecutor(1)\n if len(files) == 1:\n f_start_id = -1\n for f_id in range(f_start_id + 1, 1 + len(files)//torch.distributed.get_world_size()):\n\n # Submit creation of next DataLoader\n if torch.distributed.get_world_size() > num_files:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n if self.is_main_process:\n logging.info(\n \"Local rank: %s | File n° %s: %s\",\n self.local_rank, f_id, os.path.basename(previous_file)\n )\n previous_file = hdf5_fpath\n dataset_future = pool.submit(\n create_pretraining_dataloader,\n hdf5_fpath,\n self.max_masked_tokens_per_input,\n self.batch_size * self.n_gpu,\n )\n\n # Iterate over batches (w/ progress bar for main process)\n validation_batches = tqdm(\n validation_dataloader,\n desc=\"Computing loss on the validation set...\"\n ) if self.is_main_process else validation_dataloader\n for batch in validation_batches:\n steps += 1\n (\n input_ids,\n segment_ids,\n input_mask,\n masked_lm_labels,\n next_sentence_labels\n ) = [tensor.to(self.device) for tensor in batch]\n\n # Forward Pass\n model_output = self.model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=masked_lm_labels,\n next_sentence_label=next_sentence_labels)\n loss = model_output['loss']\n if self.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n divisor = self.num_accumulation_steps\n if self.num_accumulation_steps > 1:\n if not self.allreduce_post_accumulation:\n # this division was merged into predivision\n loss = loss / self.num_accumulation_steps\n divisor = 1.0\n\n # Update average\n average_loss += loss.item()\n \n # Move to next file after using up all batches of current file\n del validation_dataloader\n validation_dataloader, hdf5_fpath = \\\n dataset_future.result(timeout=None)\n\n del validation_dataloader\n\n num_steps = max(1, int(steps / self.num_accumulation_steps))\n average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()\n average_loss = average_loss / (num_steps * divisor)\n if torch.distributed.is_initialized():\n average_loss /= torch.distributed.get_world_size()\n torch.distributed.all_reduce(average_loss)\n\n # Check if model has improved\n validation_loss = average_loss.item()\n model_has_improved = False\n if validation_loss < self.best_validation_loss:\n model_has_improved = True\n self.best_validation_loss = validation_loss\n\n # Log\n if self.is_main_process:\n logging.info(\n \"\\nTotal Validation Steps: %s | Validation Loss = %.3f\",\n num_steps, validation_loss\n )\n self.tensorboard_writer.add_scalar(\n \"Avg. validation loss\", validation_loss,\n global_step=self.global_step\n )\n\n # NOTE: /!\\ Put model back in `training mode`\n self.model.train()\n\n return model_has_improved", "def check(self) -> None:\n # validate training config\n super().check()", "def fit(self):\n # Iterate and train.\n step_file = self.checkpointer.get_step_file()\n start_step = Pickle.load(open(step_file, 'rb'))\n for step in xrange(start_step, self.train_size // self.train_batch_size):\n print 'Step No.:', step\n # Checkpoint tensorflow variables for recovery\n if step % self.checkpointer.get_checkpoint_steps() == 0:\n print 'Checkpointing: Saving Tensorflow variables'\n self.saver.save(self.sess, self.checkpointer.get_save_address())\n Pickle.dump(step + 1, open(step_file, 'wb'))\n print 'Checkpointing Complete. Deleting historical checkpoints....'\n self.checkpointer.delete_previous_checkpoints(num_previous=2)\n print 'Deleted.. Moving forward...'\n\n offset = (step * self.train_batch_size) % self.train_size\n batch_data_fwd = self.X_trn_fwd[offset:(offset + self.train_batch_size), :].T\n batch_data_bwd = self.X_trn_bwd[offset:(offset + self.train_batch_size), :].T\n batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T\n\n loss_t_forward, loss_t_backward = self._train_batch(batch_data_fwd, batch_data_bwd, batch_labels)\n print \"Present Loss Forward:\", loss_t_forward\n print \"Present Loss Backward:\", loss_t_backward\n\n # check results on 2 tasks - Visual Validation\n print 'Train Data Validation\\n'\n self._visual_validate(self.X_trn_fwd[301, :], self.X_trn_bwd[301, :], self.Y_trn[301, :])\n print\n print\n print 'Test Data Validation\\n'\n self._visual_validate(self.X_tst_fwd[56, :], self.X_tst_bwd[56, :], self.Y_tst[56, :])\n print\n print\n\n # Store prediction after certain number of steps #############\n # This will be useful for the graph construction\n '''\n if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0):\n self.predict()\n self.store_test_predictions('_' + str(step))\n '''", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def _do_training_cross_validation(self) -> None:\n\n cfg = self.cfg_\n fit_kwargs = {'classes': list(self.data_.classes)}\n\n # Store all of the samples used during cross-validation\n self.y_training_set_all_ = list(self._generate_samples(self.train_ids_, 'y'))\n\n # Initialize learner objects with the optimal set of parameters\n # learned from the grid search round (one for each\n # sub-experiment of the cross-validation round)\n for learner, learner_name in zip(self.learners_, self.learner_names_):\n self.cv_learners_[learner_name] = \\\n [learner(**self.learner_gs_cv_params_[learner_name])\n for i in range(len(self.data_.training_set))]\n\n # Make a list of empty lists corresponding to each estimator\n # instance for each learner, which will be used to store the\n # performance metrics for each cross-validation\n # leave-one-fold-out sub-experiment\n self.cv_learner_stats_ = [[] for _ in cfg.learners]\n\n # Fit the `SelectPercentile` feature selector (if applicable)\n if cfg.feature_selection_percentile != 1.0:\n loginfo('Removing {0}% of the features during training round...'\n .format(100 - 100*cfg.feature_selection_percentile))\n feature_selector = \\\n (SelectPercentile(chi2,\n percentile=100*cfg.feature_selection_percentile)\n .fit(self._vectorize_and_sparsify_data(self.training_vec_,\n self.train_ids_),\n self.y_training_set_all_))\n\n # For each fold of the training set, train on all of the other\n # folds and evaluate on the one left out fold\n for i, held_out_fold in enumerate(self.data_.training_set):\n\n loginfo('Cross-validation sub-experiment #{0} in progress'\n .format(i + 1))\n\n # Use each training fold (except for the held-out set) to\n # incrementally build up the model\n training_folds = (self.data_.training_set[:i]\n + self.data_.training_set[i + 1:])\n y_train_all = []\n for j, training_fold in enumerate(training_folds):\n\n # Get the training data\n y_train = list(self._generate_samples(training_fold, 'y'))\n y_train_all.extend(y_train)\n X_train = self._vectorize_and_sparsify_data(self.training_vec_,\n training_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_train = feature_selector.transform(X_train)\n\n # Iterate over the learners\n for learner_name in self.learner_names_:\n\n # Partially fit each estimator with the new training\n # data (specifying the `classes` keyword argument if\n # this is the first go-round and it's a learner that\n # requires this to be specified initially)\n (self.cv_learners_[learner_name][i]\n .partial_fit(X_train,\n y_train,\n **fit_kwargs if not j and learner_name\n in self.requires_classes_kwarg_\n else {}))\n\n # Get mean and standard deviation for actual values\n y_train_all = np.array(y_train_all)\n y_train_mean = y_train_all.mean()\n y_train_std = y_train_all.std()\n\n # Get test data\n y_test = list(self._generate_samples(held_out_fold, 'y'))\n X_test = self._vectorize_and_sparsify_data(self.training_vec_,\n held_out_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_test = feature_selector.transform(X_test)\n\n # Make predictions with the modified estimators\n for j, learner_name in enumerate(self.learner_names_):\n\n # Make predictions with the given estimator,rounding the\n # predictions\n y_test_preds = \\\n np.round(self.cv_learners_[learner_name][i].predict(X_test))\n\n # Rescale the predicted values based on the\n # mean/standard deviation of the actual values and\n # fit the predicted values within the original scale\n # (i.e., no predicted values should be outside the range\n # of possible values)\n y_test_preds_dict = \\\n ex.rescale_preds_and_fit_in_scale(y_test_preds,\n self.data_.classes,\n y_train_mean,\n y_train_std)\n\n if cfg.rescale:\n y_test_preds = y_test_preds_dict['rescaled']\n else:\n y_test_preds = y_test_preds_dict['fitted_only']\n\n # Evaluate the predictions and add to list of evaluation\n # reports for each learner\n (self.cv_learner_stats_[j]\n .append(ex.evaluate_predictions_from_learning_round(\n y_test=y_test,\n y_test_preds=y_test_preds,\n classes=self.data_.classes,\n prediction_label=cfg.prediction_label,\n non_nlp_features=cfg.non_nlp_features,\n nlp_features=cfg.nlp_features,\n learner=self.cv_learners_[learner_name][i],\n learner_name=learner_name,\n games=cfg.games,\n test_games=cfg.games,\n _round=i + 1,\n iteration_rounds=self.data_.folds,\n n_train_samples=len(y_train_all),\n n_test_samples=len(held_out_fold),\n rescaled=cfg.rescale,\n transformation_string=self.transformation_string_,\n bin_ranges=cfg.bin_ranges)))", "def validate(self, current_index): \n loss_out = []\n gts_cat = torch.LongTensor()\n pred_cat = torch.LongTensor()\n Validation = self.datasetManager.get_validation_dataloader()\n length = len(Validation)\n print('\\nValidation : %i steps'%length)\n for i, batch in tqdm.tqdm(enumerate(Validation)):\n batch = self.to_device(batch)\n img = batch[0]\n gts = batch[1]\n out = self.network(img)\n out = self.softmax(out)\n loss = self.loss(out,gts)\n pred = torch.argmax(out, 1, keepdim = True)\n pred = pred.view(-1)\n loss_out.append(loss.item())\n \n gts_cat = torch.cat((gts_cat,gts.cpu()),0)\n pred_cat = torch.cat((pred_cat,pred.cpu()),0)\n\n f1_score = sklearn.metrics.f1_score(gts_cat,pred_cat, average = 'macro')\n Kappa = sklearn.metrics.cohen_kappa_score(gts_cat,pred_cat)\n Accuracy = sklearn.metrics.accuracy_score(gts_cat,pred_cat) \n \n self.tb_writer.add_scalar(\"f1 score\",f1_score,current_index)\n self.tb_writer.add_scalar('Kappa score',Kappa,current_index)\n self.tb_writer.add_scalar('Accuracy', Accuracy, current_index)\n self.tb_writer.add_scalar('Validation Loss', np.mean(loss_out), current_index)\n \n return np.mean(loss_out)", "def _evaluate_during_fit(self, test_loader, epoch):", "def compute_epoch(self, dataset, validation=False):\n\n if validation:\n self.model.eval()\n else:\n # self.model.train()\n # deal with pretrained models.\n if self.opt.freeze_encoder:\n self.model.encoder.eval()\n else:\n self.model.encoder.train()\n\n if self.opt.freeze_decoder:\n self.model.decoder.eval()\n self.model.generator.eval()\n else:\n self.model.decoder.train()\n self.model.generator.train()\n\n total_loss, n_word_total, n_word_correct = 0, 0, 0\n \n\n label = \"Training\" if not validation else \"Validation\"\n for batch in tqdm(dataset, desc=' - '+label, leave=False, dynamic_ncols=True):\n \n # prepare data\n src_seq, src_pos, tgt_seq, tgt_pos = map(\n lambda x: x.to(self.device), batch)\n\n \n gold = tgt_seq[:, 1:]\n if not validation:\n self.optimiser.zero_grad()\n # compute forward propagation\n pred = self.model(src_seq, src_pos, tgt_seq, tgt_pos)\n \n # compute performance\n loss, n_correct = self.performance(\n pred.view(-1, pred.size(2)), \n gold, \n smoothing=self.opt.label_smoothing)\n\n if not validation:\n # backwards propagation\n loss.backward()\n # update parameters\n self.optimiser.step_and_update_lr()\n else:\n if self.opt.log:\n # generate outputs\n self.save_eval_outputs(pred)\n\n # bartending outputs.\n total_loss += loss.detach().item()\n n_word_total += gold.ne(self.constants.PAD).sum().detach().item()\n n_word_correct += n_correct\n\n loss_per_word = total_loss/n_word_total\n accuracy = n_word_correct/n_word_total\n\n return loss_per_word, accuracy", "def train(self, training_steps=10):", "def validate(args, net, val_data_loader, val_dataset, iteration_num, iou_thresh=0.5):\r\n print('Validating at ', iteration_num)\r\n num_images = len(val_dataset)\r\n num_classes = args.num_classes\r\n\r\n det_boxes = [[] for _ in range(len(CLASSES))]\r\n gt_boxes = []\r\n print_time = True\r\n batch_iterator = None\r\n val_step = 100\r\n count = 0\r\n torch.cuda.synchronize()\r\n ts = time.perf_counter()\r\n\r\n for val_itr in range(len(val_data_loader)):\r\n if not batch_iterator:\r\n batch_iterator = iter(val_data_loader)\r\n\r\n torch.cuda.synchronize()\r\n t1 = time.perf_counter()\r\n\r\n images, targets, img_indexs = next(batch_iterator)\r\n batch_size = images.size(0)\r\n height, width = images.size(2), images.size(3)\r\n\r\n if args.cuda:\r\n images = Variable(images.cuda(), volatile=True)\r\n output = net(images)\r\n\r\n loc_data = output[0]\r\n conf_preds = output[1]\r\n prior_data = output[2]\r\n\r\n if print_time and val_itr%val_step == 0:\r\n torch.cuda.synchronize()\r\n tf = time.perf_counter()\r\n print('Forward Time {:0.3f}'.format(tf-t1))\r\n for b in range(batch_size):\r\n gt = targets[b].numpy()\r\n gt[:,0] *= width\r\n gt[:,2] *= width\r\n gt[:,1] *= height\r\n gt[:,3] *= height\r\n gt_boxes.append(gt)\r\n decoded_boxes = decode(loc_data[b].data, prior_data.data, args.cfg['variance']).clone()\r\n conf_scores = net.softmax(conf_preds[b]).data.clone()\r\n\r\n for cl_ind in range(1, num_classes):\r\n scores = conf_scores[:, cl_ind].squeeze()\r\n c_mask = scores.gt(args.conf_thresh) # greater than minmum threshold\r\n scores = scores[c_mask].squeeze()\r\n # print('scores size',scores.size())\r\n if scores.dim() == 0:\r\n # print(len(''), ' dim ==0 ')\r\n det_boxes[cl_ind - 1].append(np.asarray([]))\r\n continue\r\n boxes = decoded_boxes.clone()\r\n l_mask = c_mask.unsqueeze(1).expand_as(boxes)\r\n boxes = boxes[l_mask].view(-1, 4)\r\n # idx of highest scoring and non-overlapping boxes per class\r\n ids, counts = nms(boxes, scores, args.nms_thresh, args.topk) # idsn - ids after nms\r\n scores = scores[ids[:counts]].cpu().numpy()\r\n boxes = boxes[ids[:counts]].cpu().numpy()\r\n # print('boxes sahpe',boxes.shape)\r\n boxes[:,0] *= width\r\n boxes[:,2] *= width\r\n boxes[:,1] *= height\r\n boxes[:,3] *= height \r\n\r\n for ik in range(boxes.shape[0]):\r\n boxes[ik, 0] = max(0, boxes[ik, 0])\r\n boxes[ik, 2] = min(width, boxes[ik, 2])\r\n boxes[ik, 1] = max(0, boxes[ik, 1])\r\n boxes[ik, 3] = min(height, boxes[ik, 3])\r\n\r\n cls_dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=True)\r\n\r\n det_boxes[cl_ind-1].append(cls_dets)\r\n count += 1\r\n if val_itr%val_step == 0:\r\n torch.cuda.synchronize()\r\n te = time.perf_counter()\r\n print('im_detect: {:d}/{:d} time taken {:0.3f}'.format(count, num_images, te-ts))\r\n torch.cuda.synchronize()\r\n ts = time.perf_counter()\r\n if print_time and val_itr%val_step == 0:\r\n torch.cuda.synchronize()\r\n te = time.perf_counter()\r\n print('NMS stuff Time {:0.3f}'.format(te - tf))\r\n print('Evaluating detections for itration number ', iteration_num)\r\n return evaluate_detections(gt_boxes, det_boxes, CLASSES, iou_thresh=iou_thresh)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def test_validation(self):\n self.validationFails()", "def validation(model, val_loader, device, writer, iterator, log_path, includeHeading):\r\n model.eval()\r\n total_loss = 0.0\r\n \r\n numBatches = len(val_loader)\r\n final = False\r\n start = True\r\n \r\n # for each batch\r\n with torch.no_grad():\r\n for i, data in enumerate(val_loader, 0):\r\n \r\n data = data.to(device)\r\n \r\n # If first or last batch \r\n if i == 1:\r\n start = False\r\n \r\n if i+1 == numBatches:\r\n final = True\r\n \r\n # Apply model to data\r\n prediction = model(data, final, start)\r\n \r\n # Calculate loss of the batch\r\n prediction = torch.clamp(prediction, 0, 255, out=None)\r\n loss = torch.nn.functional.mse_loss(prediction, data.y)\r\n total_loss += loss.item()\r\n \r\n # Dump images from first batch\r\n if i == 0:\r\n \r\n # Retransform to images\r\n y_predict = retransformToImage(prediction.cpu().detach().numpy(), includeHeading)\r\n y_true = retransformToImage(data.y.cpu().detach().numpy(), includeHeading)\r\n \r\n # Dump images\r\n writer.write_image(y_predict, iterator, if_predict=True, includeHeading = includeHeading)\r\n writer.write_image(y_true, iterator, if_predict=False, includeHeading = includeHeading)\r\n \r\n # Print and dump Total validation loss \r\n valLoss = total_loss / len(val_loader)\r\n print(\"Validation loss = {:.2f}\".format(valLoss))\r\n # write the validation loss to tensorboard\r\n writer.write_loss_validation(valLoss, iterator)\r\n \r\n pickle.dump(valLoss, open(os.path.join(log_path,'valLoss.data'), 'wb'))\r\n \r\n return valLoss", "def train_step(self):\n pass", "def validation_step(self, batch, batch_idx):\n\n x, y, s = batch\n\n pred = self.forward(x, y)\n\n targets = self.idx_mapping(s).float()\n\n loss = self.loss_fct(pred, targets)\n\n accuracy = torch.true_divide(torch.sum(torch.round(torch.sigmoid(pred)) == targets), targets.shape[0])\n\n self.log('validation/loss', loss)\n self.log('validation/accuracy', accuracy)\n\n\n return loss", "def train(self, inputs, targets, validation_data, num_epochs, regularizer_type=None):\n for k in xrange(num_epochs):\n loss = 0\n # Forward pass\n a1, probs = self._feed_forward(inputs)\n \n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(inputs, targets, a1, probs,len(inputs))\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n \n\n # validation using the validation data\n\n validation_inputs = validation_data[0]\n validation_targets = validation_data[1]\n\n print 'Validation'\n\n # Forward pass\n a1, probs = self._feed_forward(validation_inputs)\n\n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(validation_inputs, validation_targets, a1, probs,len(validation_inputs))\n\n if regularizer_type == 'L2':\n dWhy = self.reg_lambda * self.Why\n dWxh = self.reg_lambda * self.Wxh\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n\n if k%1 == 0:\n print \"Epoch \" + str(k) + \" : Loss = \" + str(self._calc_smooth_loss(loss, len(inputs), regularizer_type))\n\n #self.save('models.pkl')", "def validate(val_loader, model, criterion, epoch, opt):\n # switch to evaluate mode\n model.eval()\n\n top1 = utils.AverageMeter()\n\n for i, (input_points, _labels, segs) in enumerate(val_loader):\n # bz x 2048 x 3 \n input_points = Variable(input_points, volatile=True)\n input_points = input_points.transpose(2, 1)\n _labels = _labels.long() # this will be feed to the network \n segs = segs.long()\n labels_onehot = utils.labels_batch2one_hot_batch(_labels, opt.num_classes)\n segs = Variable(segs, volatile=True) \n labels_onehot = Variable(labels_onehot, volatile=True)\n\n if opt.cuda:\n input_points = input_points.cuda() \n segs = segs.cuda() # must be long cuda tensor \n labels_onehot = labels_onehot.float().cuda() # this will be feed into the network\n \n # forward, backward optimize \n pred, _, _ = model(input_points, labels_onehot)\n pred = pred.view(-1, opt.num_seg_classes)\n segs = segs.view(-1, 1)[:, 0] # min is already 0\n # debug_here() \n loss = criterion(pred, segs) \n\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(segs.data).cpu().sum()\n\n acc = correct/float(opt.batch_size * opt.num_points)\n top1.update(acc, input_points.size(0))\n\n if i % opt.print_freq == 0:\n print('[%d: %d] val loss: %f accuracy: %f' %(i, len(val_loader), loss.data[0], acc))\n # print(tested_samples)\n return top1.avg", "def validate_step(self, generator=None, max_batch_num=None, valid_type='train', epoch_it=0):\n # clearing cuda, because of memory leak\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n if valid_type == 'train':\n train_losses = self.train_losses.copy()\n self.init_train_losses()\n return train_losses\n\n elif valid_type == 'valid':\n pred_sed_list, pred_doa_list = [], []\n gt_sed_list, gt_doa_list = [], []\n loss_all, loss_sed, loss_doa, loss_orthogonal , loss_doa_smoothness = 0., 0., 0., 0., 0.\n\n for batch_idx, batch_sample in enumerate(generator):\n if batch_idx == max_batch_num:\n break\n\n batch_x = batch_sample['waveform']\n data_type = batch_sample['data_type']\n batch_target = {\n 'sed': batch_sample['sed_label'],\n 'doa': batch_sample['doa_label']\n }\n\n if self.cuda:\n batch_x = batch_x.cuda(non_blocking=True)\n batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)\n batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)\n\n with torch.no_grad():\n self.af_extractor.eval()\n self.model.eval()\n (batch_x, batch_target) = self.af_extractor((batch_x, batch_target,valid_type, data_type ))\n batch_x = (batch_x - self.mean) / self.std\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n pred, pred_constraint = self.model(batch_x)\n if self.cfg['training']['model'] == 'EINV2':\n pred = self.model(batch_x)\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it, self.model)\n if self.cfg['training']['model'] == 'EINV2':\n loss_dict = self.losses.calculate(pred,batch_target, epoch_it, self.model)\n pred['sed'] = torch.sigmoid(pred['sed'])\n loss_all += loss_dict['all'].cpu().detach().numpy()\n loss_sed += loss_dict['sed'].cpu().detach().numpy()\n loss_doa += loss_dict['doa'].cpu().detach().numpy()\n\n if self.cfg['training']['weight_constraints']:\n loss_orthogonal += loss_dict['loss_weight_orthogonal'].cpu().detach().numpy()\n\n if self.cfg['training']['layer_constraints']:\n loss_orthogonal += loss_dict['loss_layer_orthogonal'].cpu().detach().numpy()\n\n if self.cfg['training']['weight_constraints_1']:\n loss_orthogonal += loss_dict['loss_weight_orthogonal_1'].cpu().detach().numpy()\n\n if self.cfg['training']['layer_constraints_1']:\n loss_orthogonal += loss_dict['loss_layer_orthogonal_1'].cpu().detach().numpy()\n\n if self.cfg['training']['smoothness_loss']:\n loss_doa_smoothness += loss_dict['loss_doa_smoothness'].cpu().detach().numpy()\n\n pred_sed_list.append(pred['sed'].cpu().detach().numpy())\n pred_doa_list.append(pred['doa'].cpu().detach().numpy())\n\n pred_sed = np.concatenate(pred_sed_list, axis=0)\n pred_doa = np.concatenate(pred_doa_list, axis=0)\n\n origin_num_clips = int(pred_sed.shape[0]/self.num_segments)\n origin_T = int(pred_sed.shape[1]*self.num_segments)\n pred_sed = pred_sed.reshape((origin_num_clips, origin_T, 2, -1))[:, :int(self.clip_length / self.label_resolution)]\n pred_doa = pred_doa.reshape((origin_num_clips, origin_T, 2, -1))[:, :int(self.clip_length / self.label_resolution)]\n\n pred_sed_max = pred_sed.max(axis=-1)\n pred_sed_max_idx = pred_sed.argmax(axis=-1)\n pred_sed = np.zeros_like(pred_sed)\n for b_idx in range(origin_num_clips):\n for t_idx in range(origin_T):\n for track_idx in range(2):\n pred_sed[b_idx, t_idx, track_idx, pred_sed_max_idx[b_idx, t_idx, track_idx]] = \\\n pred_sed_max[b_idx, t_idx, track_idx]\n pred_sed = (pred_sed > self.cfg['training']['threshold_sed']).astype(np.float32)\n \n # convert Catesian to Spherical\n azi = np.arctan2(pred_doa[..., 1], pred_doa[..., 0])\n elev = np.arctan2(pred_doa[..., 2], np.sqrt(pred_doa[..., 0]**2 + pred_doa[..., 1]**2))\n pred_doa = np.stack((azi, elev), axis=-1) # (N, T, tracks, (azi, elev))\n\n # convert format\n pred_sed_metrics2019, pred_doa_metrics2019 = to_metrics2019_format(pred_sed, pred_doa)\n gt_sed_metrics2019, gt_doa_metrics2019 = self.valid_gt_sed_metrics2019, self.valid_gt_doa_metrics2019\n pred_dcase_format_dict = to_dcase_format(pred_sed, pred_doa)\n pred_metrics2020_dict = to_metrics2020_format(pred_dcase_format_dict, \n pred_sed.shape[0]*pred_sed.shape[1], label_resolution=self.label_resolution)\n gt_metrics2020_dict = self.gt_metrics2020_dict\n\n\n if self.cfg['training']['weight_constraints']:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n 'loss_orthogonal': loss_orthogonal / (batch_idx + 1),\n }\n elif self.cfg['training']['layer_constraints']:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n 'loss_layer_orthogonal': loss_orthogonal / (batch_idx + 1),\n }\n elif self.cfg['training']['weight_constraints_1']:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n 'loss_orthogonal_1': loss_orthogonal / (batch_idx + 1),\n }\n elif self.cfg['training']['layer_constraints_1']:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n 'loss_layer_orthogonal_1': loss_orthogonal / (batch_idx + 1),\n }\n elif self.cfg['training']['smoothness_loss']:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n 'loss_doa_smoothness': loss_doa_smoothness / (batch_idx + 1),\n }\n\n else:\n out_losses = {\n 'loss_all': loss_all / (batch_idx + 1),\n 'loss_sed': loss_sed / (batch_idx + 1),\n 'loss_doa': loss_doa / (batch_idx + 1),\n }\n\n\n pred_dict = {\n 'dcase2019_sed': pred_sed_metrics2019,\n 'dcase2019_doa': pred_doa_metrics2019,\n 'dcase2020': pred_metrics2020_dict,\n }\n\n gt_dict = {\n 'dcase2019_sed': gt_sed_metrics2019,\n 'dcase2019_doa': gt_doa_metrics2019,\n 'dcase2020': gt_metrics2020_dict,\n }\n metrics_scores = self.metrics.calculate(pred_dict, gt_dict)\n return out_losses, metrics_scores", "def train(self):\n not_improved_count = 0\n best_validation_fscore = 0.0\n\n for epoch in range(self.start_epoch, self.max_epochs + 1):\n # Perform one training epoch and output training metrics\n training_metrics = self.run_epoch(epoch, self.train_data_loader, training=True)\n self.logger.info(\"Training epoch {} finished.\".format(epoch))\n self.log_metrics(training_metrics)\n\n # Perform one validation epoch and output validation metrics\n validation_metrics = self.run_epoch(epoch, self.valid_data_loader, training=False)\n self.logger.info(\"Validation epoch {} finished.\".format(epoch))\n self.log_metrics(validation_metrics)\n\n # Check if model is new best according to validation F1 score\n improved = validation_metrics[\"fscore\"] > best_validation_fscore\n if improved:\n best_validation_fscore = validation_metrics[\"fscore\"]\n not_improved_count = 0\n else:\n not_improved_count += 1\n\n if improved or epoch % self.save_period == 0:\n self._save_checkpoint(epoch, is_best=improved)\n\n if not_improved_count > self.early_stop and epoch >= self.min_epochs:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break", "def train(self):\n\t\traise NotImplementedError", "def train(self):\n raise NotImplementedError", "def train_model(self):\n ### Early Stop Mechanism\n loss = previous_loss = float(\"inf\")\n patience_left = self.config.patience\n ### Early Stop Mechanism\n\n self.generator = Generator(self.model.config, training_strategy=self.training_strategy)\n self.evaluator = Evaluator(model=self.model, data_type=self.teston, debug=self.debug)\n\n if self.config.loadFromData:\n self.load_model()\n \n for cur_epoch_idx in range(self.config.epochs):\n print(\"Epoch[%d/%d]\"%(cur_epoch_idx,self.config.epochs))\n loss = self.train_model_epoch(cur_epoch_idx)\n self.test(cur_epoch_idx)\n\n ### Early Stop Mechanism\n ### start to check if the loss is still decreasing after an interval. \n ### Example, if early_stop_epoch == 50, the trainer will check loss every 50 epoche.\n ### TODO: change to support different metrics.\n if ((cur_epoch_idx + 1) % self.config.early_stop_epoch) == 0: \n if patience_left > 0 and previous_loss <= loss:\n patience_left -= 1\n print('%s more chances before the trainer stops the training. (prev_loss, curr_loss): (%.f, %.f)' % \\\n (patience_left, previous_loss, loss))\n\n elif patience_left == 0 and previous_loss <= loss:\n self.evaluator.result_queue.put(Evaluator.TEST_BATCH_EARLY_STOP)\n break\n else:\n patience_left = self.config.patience\n\n previous_loss = loss\n ### Early Stop Mechanism\n\n self.generator.stop()\n self.evaluator.save_training_result(self.training_results)\n self.evaluator.stop()\n\n if self.config.save_model:\n self.save_model()\n\n if self.config.disp_result:\n self.display()\n\n if self.config.disp_summary:\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)\n\n self.export_embeddings()\n\n return loss", "def validationSet(self):\r\n self.currIdx = 0\r\n self.samples = self.validationSamples", "def validate(val_loader, model, epoch, cfg):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n\n # testing mode\n model.eval()\n\n for i, (shapes, labels) in enumerate(val_loader):\n batch_time.reset()\n # bz x 12 x 3 x 224 x 224\n labels = labels.long().view(-1)\n shapes = Variable(shapes)\n labels = Variable(labels)\n\n # shift data to GPU\n if cfg.cuda:\n shapes = shapes.cuda()\n labels = labels.cuda()\n\n # forward, backward optimize\n preds = model(shapes)\n\n if cfg.have_aux:\n preds, aux = preds\n\n prec.add(preds.data, labels.data)\n\n if i % cfg.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time:.3f}\\t'\n 'Epoch Time {data_time:.3f}\\t'\n 'Prec@1 {top1:.3f}\\t'.format(\n epoch, i, len(val_loader), batch_time=batch_time.value(),\n data_time=data_time.value(), top1=prec.value(1)))\n\n print('mean class accuracy at epoch {0}: {1} '.format(epoch, prec.value(1)))\n\n return prec.value(1)", "def test_validation():\n # specify the parameters for the validation\n params = {}\n\n params['model_params'] = {'func': model.mnist_tfutils}\n\n params['load_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0'}\n\n params['save_params'] = {'exp_id': 'validation0'}\n\n params['validation_params'] = {'valid0': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 10,\n 'agg_func': utils.mean_dict}}\n params['skip_check'] = True\n\n # check that the results are correct\n conn = pm.MongoClient(host=testhost,\n port=testport)\n\n conn[testdbname][testcol + '.files'].delete_many({'exp_id': 'validation0'})\n\n # actually run the model\n base.test_from_params(**params)\n\n # ... specifically, there is now a record containing the validation0 performance results\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'validation0'}).count() == 1\n # ... here's how to load the record:\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'validation0'})[0]\n asserts_for_record(r, params, train=False)\n\n # ... check that the recorrectly ties to the id information for the\n # pre-trained model it was supposed to validate\n assert r['validates']\n idval = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0'})[50]['_id']\n v = conn[testdbname][testcol + '.files'].find({'exp_id': 'validation0'})[0]['validates']\n assert idval == v", "def train_model(model, train, validation):\n # Add your code here\n\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, restore_best_weights=True)\n\n model.fit_generator(\n generator=train,\n validation_data=validation,\n epochs=1000,\n callbacks=monitor\n\n )\n # Preprocessing (Enrichment)\n # Preprocessing (Normalisation)\n\n return model", "def train_model(self):\n early_stopping = EarlyStopping(self, self.hyper.early_stopping_enabled, self.hyper.early_stopping_limit)\n loss_history_train = []\n loss_metric_train = tf.keras.metrics.Mean()\n\n x_train, next_values_train = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train,\n self.dataset.next_values_train])\n\n x_train_val, next_values_train_val = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train_val,\n self.dataset.next_values_train_val])\n\n for epoch in range(self.hyper.epochs):\n print(\"Epoch %d\" % (epoch,))\n\n for step, (x_batch_train, next_values_batch_train) in enumerate(zip(x_train, next_values_train)):\n self.train_step(x_batch_train, next_values_batch_train, loss_metric_train)\n\n if step % 50 == 0:\n print(\"\\tStep %d: mean loss = %.4f\" % (step, loss_metric_train.result()))\n\n loss_train_batch = loss_metric_train.result()\n loss_history_train.append(loss_train_batch)\n loss_metric_train.reset_states()\n\n self.model.save_weights(self.checkpoint_path.format(epoch=epoch))\n\n # Check early stopping criterion --> Has the loss on the validation set not decreased?\n best_epoch = early_stopping.execute(epoch, x_train_val, next_values_train_val)\n self.clean_up(early_stopping, epoch)\n\n if best_epoch > 0:\n print('Model from epoch %d was selected by early stopping.' % best_epoch)\n print('Training process will be stopped now.')\n\n self.save_model(best_epoch)\n\n return\n\n self.save_model(epoch=self.hyper.epochs - 1)", "def _valid_epoch(self, epoch):\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n for batch_idx, (data, target, _) in enumerate(self.valid_data_loader):\n data, target = data.to(device=self.device, dtype=torch.float), target.to(self.device, dtype=torch.float)\n\n output = self.model(data)\n\n if self.only_scored_classes:\n # Only consider classes that are scored with the Challenge metric.\n if self.config[\"loss\"][\"type\"] == \"weighted_bce_with_logits_loss\":\n loss = self.criterion(output[:, self.indices], target[:, self.indices], self.weights)\n else:\n loss = self.criterion(output[:, self.indices], target[:, self.indices])\n else:\n loss = self.criterion(output, target)\n\n self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')\n self.valid_metrics.update('loss', loss.item())\n\n output_logit = self.sigmoid(output)\n for met in self.metric_ftns:\n self.valid_metrics.update(met.__name__, met(self._to_np(output_logit), self._to_np(target)))\n # self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n if self.lr_scheduler is not None and self.config[\"lr_scheduler\"][\"type\"] == \"ReduceLROnPlateau\":\n self.lr_scheduler.step(self.valid_metrics.result()[\"challenge_metric\"])\n\n # add histogram of model parameters to the tensorboard\n # for name, p in self.model.named_parameters():\n # self.writer.add_histogram(name, p, bins='auto')\n return self.valid_metrics.result()", "def validate(self, trainingSet): \n if self.regression:\n return self._validateRegression(trainingSet) \n else:\n return self._validateClassification(trainingSet)", "def cross_validate_model(self, X_train, y_train):\n\n\t\t# Build a stratified k-fold cross-validator object\n\t\tskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n\n\t\t'''\n\t\tEvaluate the score by cross-validation\n\t\tThis fits the classification model on the training data, according to the cross-validator\n\t\tand reports the scores.\n\t\tAlternative: sklearn.model_selection.cross_validate\n\t\t'''\n\t\tscores = cross_val_score(self.classifier, X_train, y_train, scoring='accuracy', cv=skf)\n\n\t\tprint(\"%.2f seconds: Cross-validation finished\" % time.process_time())\n\n\t\t# Log the cross-validation scores, the mean score and the 95% confidence interval, according to:\n\t\t# http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics\n\t\t# https://en.wikipedia.org/wiki/Standard_error#Assumptions_and_usage\n\t\t# print(\"Scores = %s\" % scores)\n\t\t# print(\"Accuracy: %0.2f (±%0.2f)\" % (scores.mean()*100, scores.std()*2*100))\n\t\t# ↳ https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html", "def train(self, training_data):\n pass", "def _valid_epoch(self, epoch):\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n for batch_idx, (data, target_seg, target_class) in enumerate(self.valid_data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n\n self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')\n self.valid_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.valid_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.valid_metrics.update(met.__name__, met(output_seg, target_seg))\n\n data_cpu = data.cpu()\n self._visualize_input(data_cpu)\n self._visualize_prediction(data_cpu, output_seg.cpu(), target_seg.cpu())\n\n # add histogram of model parameters to the tensorboard\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(name, p, bins='auto')\n return self.valid_metrics.result()", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\n # loop for number of epochs\n # shuffle inputs based off seed\n # need to shuffle validation based off same seed\n # forward prop and get xenloss\n # backprop and update weights\n\n stop_count = config['early_stop_epoch']\n b_size = config[\"batch_size\"]\n stop = config['early_stop']\n\n xnloss = []\n val_loss = [float('inf')]\n test_scores = []\n\n train_accu = []\n valid_accu = []\n\n\n #validation loss increase per epoch counter\n c = -1\n \n for i in range(config[\"epochs\"]):\n np.random.seed(i)\n np.random.shuffle(X_train)\n\n np.random.seed(i)\n np.random.shuffle(y_train)\n\n '''You should average the loss across all mini batches'''\n #means sum up loss from all mini-batches and divide by num_batches\n sums = 0\n\n num_batches = int(X_train.shape[0] / b_size)\n k=0\n for j in range(num_batches):\n # choose minibatch\n x = X_train[j * b_size: (j+1) * b_size]\n targets = y_train[j * b_size: (j+1) * b_size]\n loss, y_pred = model.forward_pass(x, targets)\n loss = loss / (config['batch_size'] * 10) # 10 classes\n sums += loss\n #xnloss.append(loss)\n model.backward_pass()\n k +=1\n # if k < 5 or k > 44:\n # print(targets[0, :])\n # print(y_pred[0, :])\n # print(y_pred[0, :].sum())\n # print(k, '=============')\n\n # mini-batch done here, take avg of loss\n avg_loss = sums / num_batches\n xnloss.append(avg_loss)\n \n ''' epochs loop continues here\n 0) perform validation and compute its (val) loss\n\n 1) calculate test accuracy for every epoch where the\n validation loss is better than the previous validation loss.\n \n 2) Save this result (test score OR loss?) and choose the best \n one when you hit the early stopping criteria.\n\n 3) early stopping - stop training (epochs loop) after 5th consecutive \n increase in validation loss. (Experiment with diff values).\n '''\n\n '''VALIDATION PERFORMACE'''\n v_loss, v_pred = model.forward_pass(X_valid, y_valid)\n v_loss_norm = v_loss / (len(X_valid) * 10)\n\n\n '''TEST ACCURACY''' \n #if val loss better (less) than prev: calculate test scores\n \n if v_loss_norm > val_loss[-1]:\n print(\"val loss going up from last time at epoch i=\", i)\n c += 1\n else:\n c = 0\n '''insert code for test accu here'''\n # val_loss.append(v_loss_norm)\n # else: #else val loss increased, so increment counter\n \n val_loss.append(v_loss_norm)\n \n '''EARLY STOPPING'''\n if stop and c == stop_count:\n print(\"early stopped at epoch =\", i+1)\n break\n\n print(val_loss[1:3])\n print(val_loss, len(xnloss), len(val_loss[1:]))\n #outside of epochs loop\n plt.plot(xnloss, label='training loss')\n plt.plot(val_loss[1:], label='validation loss')\n plt.title(\"losses across all epochs\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"avg loss for the epoch\")\n plt.legend()\n plt.savefig('raised_a.png')\n plt.show()\n #firstplot.png is training loss against # of batches, in 1 epoch\n #avgacrossepochs.png is avg training loss of all batches, across 50 epochs\n # both_losses = []\n \n # for i in range(len(xnloss)):\n # both_losses.append((val_loss[i], xnloss[i]))\n # print(\"validation errors: \", [(val_loss[i], xnloss[i]) for i in range(len(xnloss))])", "def train(self, ):\n raise NotImplementedError", "def executeKFoldValidation(train_data, train_labels, val_data, val_labels, test_data, test_labels,\n images_47, labels_47, images_84, labels_84, all_unseen_images, all_unseen_labels):\n if run_k_fold_validation:\n print(\"In executingKFoldValidation\")\n\n # this is doing it manually:\n kfold = StratifiedKFold(n_splits=k_fold_num, shuffle=True)\n\n test_scores_list = []\n unseen_47_scores_list = []\n unseen_84_scores_list = []\n all_unseen_scores_list = []\n\n test_matrix_list = []\n matrix_47_list = []\n matrix_84_list = []\n all_matrix_list = []\n kf_counter = 0\n\n for train, test in kfold.split(train_data, train_labels):\n kf_counter += 1\n print('KFold #:', kf_counter)\n\n model = buildClassifier()\n # fit the model\n model.fit(train_data[train],\n train_labels[train],\n epochs=epochs,\n validation_data=(val_data, val_labels),\n batch_size=batch_size\n )\n\n unseen_47_scores = model.evaluate(images_47, labels_47, batch_size=batch_size)\n unseen_47_scores_list.append(unseen_47_scores[1] * 100)\n unseen_84_scores = model.evaluate(images_84, labels_84, batch_size=batch_size)\n unseen_84_scores_list.append(unseen_84_scores[1] * 100)\n test_scores = model.evaluate(test_data, test_labels, batch_size=batch_size)\n test_scores_list.append(test_scores[1] * 100)\n all_unseen_score = model.evaluate(all_unseen_images, all_unseen_labels, batch_size=batch_size)\n all_unseen_scores_list.append(all_unseen_score[1] * 100)\n\n # show confusion matrix\n test_confusion_matrix, confusion_matrix_47, confusion_matrix_84, all_confusion_matrix = \\\n gettingKFoldConfusionMatrix(test_data, test_labels, images_47, labels_47, images_84, labels_84,\n all_unseen_images,\n all_unseen_labels, kf_counter)\n test_matrix_list.append(test_confusion_matrix)\n matrix_47_list.append(confusion_matrix_47)\n matrix_84_list.append(confusion_matrix_84)\n all_matrix_list.append(all_confusion_matrix)\n\n test_scores_mean = np.mean(test_scores_list)\n test_scores_std = np.std(test_scores_list)\n unseen_47_mean = np.mean(unseen_47_scores_list)\n unseen_47_std = np.std(unseen_47_scores_list)\n unseen_84_mean = np.mean(unseen_84_scores_list)\n unseen_84_std = np.std(unseen_84_scores_list)\n all_unseen_mean = np.mean(all_unseen_scores_list)\n all_unseen_std = np.std(all_unseen_scores_list)\n\n print(\"Test Scores: \" + str(test_scores_list))\n print(\"Test Scores Mean: \" + str(test_scores_mean))\n print(\"Test Scores Std: \" + str(test_scores_std))\n print(\"Unseen 47 Scores: \" + str(unseen_47_scores_list))\n print(\"Unseen 47 Scores Mean: \" + str(unseen_47_mean))\n print(\"Unseen 47 Scores Std: \" + str(unseen_47_std))\n print(\"Unseen 84 Scores: \" + str(unseen_84_scores_list))\n print(\"Unseen 84 Scores Mean: \" + str(unseen_84_mean))\n print(\"Unseen 84 Scores Std: \" + str(unseen_84_std))\n print(\"All Unseen Scores: \" + str(all_unseen_scores_list))\n print(\"All Unseen Scores Mean: \" + str(all_unseen_mean))\n print(\"All Unseen Scores Std: \" + str(all_unseen_std))\n print(\"Test Confusion Matrices: \" + str(test_matrix_list))\n print(\"47 Confusion Matrices: \" + str(matrix_47_list))\n print(\"84 Confusion Matrices: \" + str(matrix_84_list))\n print(\"All Confusion Matrices: \" + str(all_matrix_list))\n\n excel_headers.append(\"Test Scores Mean\")\n excel_dictionary.append(test_scores_mean)\n excel_headers.append(\"Test Scores Std\")\n excel_dictionary.append(test_scores_std)\n excel_headers.append(\"Unseen 47 Scores Mean\")\n excel_dictionary.append(unseen_47_mean)\n excel_headers.append(\"Unseen 47 Scores Std\")\n excel_dictionary.append(unseen_47_std)\n excel_headers.append(\"Unseen 84 Scores Mean\")\n excel_dictionary.append(unseen_84_mean)\n excel_headers.append(\"Unseen 84 Scores Std\")\n excel_dictionary.append(unseen_84_std)\n excel_headers.append(\"All Unseen Scores Mean\")\n excel_dictionary.append(all_unseen_mean)\n excel_headers.append(\"All Unseen Scores Std\")\n excel_dictionary.append(all_unseen_std)\n\n plt.plot(test_scores_list, color='red', label='Testing Scores')\n plt.plot(unseen_47_scores_list, color='blue', label='Unseen 47 Scores')\n plt.plot(unseen_84_scores_list, color='black', label='Unseen 84 Scores')\n plt.plot(all_unseen_scores_list, color='green', label='Unseen Scores')\n plt.xlabel('Folds')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.show()", "def validation_classification(model, val_dataloader, epoch, criterion, cfg,\n writer):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n\n end = time.time()\n with torch.no_grad():\n for step, data in enumerate(val_dataloader):\n data_time.update(time.time() - end)\n val_batch = data[0].cuda()\n val_label = data[1].cuda()\n outputs = model(val_batch)\n\n loss = criterion(outputs, val_label)\n if cfg.CONFIG.DATA.NUM_CLASSES < 5:\n prec1a, prec5a = accuracy(outputs.data, val_label, topk=(1, 1))\n # Tricky solution for datasets with less than 5 classes, top5 acc is always set to 100%\n prec5a = 100\n else:\n prec1a, prec5a = accuracy(outputs.data, val_label, topk=(1, 5))\n\n losses.update(loss.item(), val_batch.size(0))\n top1.update(prec1a.item(), val_batch.size(0))\n top5.update(prec5a.item(), val_batch.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n logger.info('----validation----')\n print_string = 'Epoch: [{0}][{1}/{2}]'.format(\n epoch, step + 1, len(val_dataloader))\n logger.info(print_string)\n print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(\n data_time=data_time.val, batch_time=batch_time.val)\n logger.info(print_string)\n print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)\n logger.info(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=top1.avg, top5_acc=top5.avg)\n logger.info(print_string)\n\n eval_path = cfg.CONFIG.LOG.EVAL_DIR\n if not os.path.exists(eval_path):\n os.makedirs(eval_path)\n\n with open(\n os.path.join(eval_path,\n \"{}.txt\".format(cfg.DDP_CONFIG.GPU_WORLD_RANK)),\n 'w') as f:\n f.write(\"{} {} {}\\n\".format(losses.avg, top1.avg, top5.avg))\n torch.distributed.barrier()\n\n loss_lst, top1_lst, top5_lst = [], [], []\n if cfg.DDP_CONFIG.GPU_WORLD_RANK == 0 and writer is not None:\n print(\"Collecting validation numbers\")\n for x in range(cfg.DDP_CONFIG.GPU_WORLD_SIZE):\n data = open(os.path.join(\n eval_path,\n \"{}.txt\".format(x))).readline().strip().split(\" \")\n data = [float(x) for x in data]\n loss_lst.append(data[0])\n top1_lst.append(data[1])\n top5_lst.append(data[2])\n print(\"Global result:\")\n print_string = 'loss: {loss:.5f}'.format(loss=np.mean(loss_lst))\n print(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=np.mean(top1_lst), top5_acc=np.mean(top5_lst))\n print(print_string)\n writer.add_scalar('val_loss_epoch', np.mean(loss_lst), epoch)\n writer.add_scalar('val_top1_acc_epoch', np.mean(top1_lst), epoch)\n writer.add_scalar('val_top5_acc_epoch', np.mean(top5_lst), epoch)", "def validate(model, dataloader):\n model.eval()\n device = model.device \n epoch_start = time.time() \n running_loss = 0.0\n running_accuracy = 0.0 \n all_prob, all_labels = [], []\n\n with torch.no_grad():\n for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader:\n seqs = batch_seqs.to(device) \n masks = batch_seq_masks.to(device)\n segments = batch_seq_segments.to(device)\n labels = batch_labels.to(device)\n \n loss, logits, probabilities = model(seqs, masks, segments, labels)\n running_loss += loss.item()\n running_accuracy += correct_predictions(probabilities, labels)\n \n all_prob.extend(probabilities[:, 1].cpu().numpy())\n all_labels.extend(batch_labels)\n epoch_time = time.time() - epoch_start \n epoch_loss = running_loss / len(dataloader)\n epoch_accuracy = running_accuracy / (len(dataloader.dataset))\n # epoch_f1 = epoch_accuracy \n return epoch_time, epoch_loss, epoch_accuracy, roc_auc_score(all_labels, all_prob),", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def evaluate(self):\n self.training = False", "def validate(self):\n with torch.no_grad():\n val_loss, val_acc = self.run_epoch(self.validate_dataloader, train=False)\n self.log_performance(self.summary_writer,\n {'loss': val_loss, 'acc': val_acc},\n self.epoch,\n self.total_steps,\n summary_group='validate')\n return val_loss, val_acc", "def train(self, sess): \n\n logging.info(\"////////////////////////////\")\n logging.info(\"///// BEGIN TRAINING /////\")\n logging.info(\"////////////////////////////\")\n\n # for TensorBoard\n summaryWriter = tf.summary.FileWriter(\n \"./checkpoints/\", \n sess.graph)\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n # Print initial model predictions\n emaTrainLoss = self.get_loss(sess, dSet=\"train\")\n emaTrainAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n logging.info(\"Initial training Loss / Accuracy: %f / %f)\" % (emaTrainLoss, emaTrainAccr))\n logging.info(\"Initial validation Loss / Accuracy: %f / %f)\" % (valLoss, valAccr))\n\n randomRatio = 1.0\n epoch = 0\n best_val_loss = None\n best_val_acc = None\n\n\n ###### Loop over epochs #####\n while (self.FLAGS.Nepochs is 0) or (epoch <= self.FLAGS.Nepochs):\n epoch += 1\n epoch_tic = time.time()\n\n # Evaluate test and validation data\n trnLoss = self.get_loss(sess, dSet=\"train\")\n trnAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n print_info = \"Full Sets\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (trnLoss, trnAccr, valLoss, valAccr)\n logging.info(\"\\n\\n///// Begin Epoch {} /////\\n\".format(epoch)\n + print_info)\n\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n ##### Loop over mini batches #####\n while True:\n\n # Perform training step\n try :\n tstep_tic = time.time()\n curLoss, curAccr, global_step = self.run_train_step(sess, summaryWriter)\n tstep_toc = time.time()\n tstep_time = tstep_toc - tstep_tic\n except tf.errors.OutOfRangeError:\n break\n\n # Update training history parameters\n emaTrainLoss = curLoss*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainLoss*self.FLAGS.train_variable_decay \n emaTrainAccr = curAccr*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainAccr*self.FLAGS.train_variable_decay \n\n ### Evaluate model ###\n if global_step % self.FLAGS.eval_every == 0:\n\n # Save training data measurements\n self.writeSummary(emaTrainLoss, \"train/loss\", summaryWriter, global_step)\n self.writeSummary(emaTrainAccr, \"train/acc\", summaryWriter, global_step)\n self.history[\"step\"].append(global_step)\n self.history[\"trainLoss\"].append(emaTrainLoss)\n self.history[\"trainAccr\"].append(emaTrainAccr)\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n self.writeSummary(valLoss, \"val/loss\", summaryWriter, global_step)\n self.writeSummary(valAccr, \"val/acc\", summaryWriter, global_step)\n self.history[\"validLoss\"].append(valLoss)\n self.history[\"validAccr\"].append(valAccr)\n\n # Logging results\n print_info = \"%i\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (global_step, emaTrainLoss, emaTrainAccr, valLoss, valAccr)\n logging.info(print_info)\n\n # plot training progress\n self.plot_results()\n\n\n # Save model\n if global_step % self.FLAGS.save_every == 0:\n logging.info(\"Saving model at iteration {} to {}\".format(\n global_step, self.FLAGS.checkpoint_path))\n self.saver.save(sess, \n self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccs = self.get_accuracy(sess, dSet=\"val\")\n\n # Save best models\n if (best_val_loss is None) or (valLoss < best_val_loss):\n logging.info(\"Saving best loss model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_loss_ckpt_path))\n best_val_loss = valLoss\n self.bestLossSaver.save(sess, \n self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n if (best_val_acc is None) or (valAccs > best_val_acc):\n logging.info(\"Saving best accuracy model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_acc_ckpt_path))\n best_val_acc = valAccs\n self.bestAccSaver.save(sess, \n self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n loss_train = self.get_loss(sess, dSet=\"train\")\n acc_train = self.get_accuracy(sess, dSet=\"train\")\n\n loss_val = self.get_loss(sess, dSet=\"val\")\n acc_val = self.get_accuracy(sess, dSet=\"val\")\n\n print(loss_train, acc_train)\n if self.FLAGS.verbose:\n print(\"\\n\\n\")\n print(\"###########################\")\n print(\"##### Final Results #####\")\n print(\"###########################\")\n print(\"\\nTraining [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_train, acc_train))\n print(\"Validation [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_val, acc_val))\n \n self.hasTrained = True", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def __call__(self,\n epoch: int,\n step: int,\n performance_measures: Dict,\n context: ModelTrainer,\n validation: bool = False) -> None:\n if step != len(context.train_data_loader) - 1: # only continue at end of epoch\n return\n\n if self.monitor not in performance_measures:\n return\n\n current_loss = performance_measures[self.monitor]\n if (self.last_best - current_loss) >= self.min_delta:\n self.last_best = current_loss\n self.counter = 0\n else:\n self.counter += 1\n\n if self.counter >= self.patience:\n context._stop_training = True # make ModelTrainer stop\n LOGGER.info(f\"Early stopping after epoch {epoch}\")", "def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n logits = self.forward(x)\n loss = self.cross_entropy_loss(logits, y)\n targets_hat = torch.argmax(logits, dim=1)\n n_correct_pred = torch.sum(y == targets_hat).item()\n return {\"val_loss\": loss, \"n_correct_pred\": n_correct_pred, \"n_pred\": len(x)}", "def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n logits = self.forward(x)\n loss = self.nll_loss(logits, y)\n targets_hat = torch.argmax(logits, dim=1)\n n_correct_pred = torch.sum(y == targets_hat).item()\n return {\"val_loss\": loss, \"n_correct_pred\": n_correct_pred, \"n_pred\": len(x)}", "def train(self):\n raise NotImplementedError()", "def train():\n pass", "def validate(val_loader, model, epoch, cfg):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n\n # testing mode\n model.eval()\n\n for i, (meshes, adjs, labels) in enumerate(val_loader):\n batch_time.reset()\n # bz x n x 3\n labels = labels.long().view(-1)\n\n # shift data to GPU\n if cfg.cuda:\n meshes = meshes.cuda()\n adjs = adjs.cuda()\n labels = labels.cuda()\n\n # forward, backward optimize\n preds = model(meshes, adjs)\n\n prec.add(preds.cpu().data.numpy(), labels.item())\n\n if i % cfg.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time:.3f}\\t'\n 'Epoch Time {data_time:.3f}\\t'\n 'Prec@1 {top1:.3f}\\t'.format(\n epoch, i, len(val_loader), batch_time=batch_time.value(),\n data_time=data_time.value(), top1=prec.value(1)))\n\n print('mean class accuracy at epoch {0}: {1} '.format(epoch, prec.value(1)))\n\n return prec.value(1)", "def train(self)->None:", "def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels", "def train(self):\n return", "def train_with_validation_provided(self, features, labels, val_features, val_labels):\n hist = self.model.fit(\n features, labels, batch_size=self.config['training']['batch_size'],\n epochs=self.config['training']['epochs'],\n validation_data=(val_features, val_labels),\n validation_freq=self.config['training']['validation_frequency'],\n callbacks=[TensorBoard(log_dir=self.config['model']['tensorboard_dir'])])\n return hist", "def validation_summaries(self, step):\n dnn_summary_writer = self.dnn_summary_writer\n gan_summary_writer = self.gan_summary_writer\n DNN = self.DNN\n D = self.D\n train_dataset = self.train_dataset\n validation_dataset = self.validation_dataset\n\n self.evaluation_epoch(DNN, train_dataset, dnn_summary_writer, '2 Train Error')\n dnn_validation_mae = self.evaluation_epoch(DNN, validation_dataset, dnn_summary_writer, '1 Validation Error')\n self.evaluation_epoch(D, train_dataset, gan_summary_writer, '2 Train Error')\n self.evaluation_epoch(D, validation_dataset, gan_summary_writer, '1 Validation Error',\n comparison_value=dnn_validation_mae)", "def validate(val_loader, net, epoch):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n retrieval_map = meter.RetrievalMAPMeter()\n\n # testing mode\n net.eval()\n\n total_seen_class = [0 for _ in range(40)]\n total_right_class = [0 for _ in range(40)]\n\n for i, (views, dps, pcs, labels) in enumerate(val_loader):\n batch_time.reset()\n\n views = views.to(device=config.device)\n pcs = pcs.to(device=config.device)\n dps = views.to(device=config.device)\n labels = labels.to(device=config.device)\n\n f_pc, f_mv, f_dp, _, _, _, de_p, de_v, de_d, dis_p, dis_v, dis_d, cls_p, cls_v, cls_d, fts, preds = net(pcs, views, dps) # bz x C x H x W\n # prec.add(preds.data, labels.data)\n\n prec.add(preds.data, labels.data)\n retrieval_map.add(fts.detach() / torch.norm(fts.detach(), 2, 1, True), labels.detach())\n for j in range(views.size(0)):\n total_seen_class[labels.data[j]] += 1\n total_right_class[labels.data[j]] += (np.argmax(preds.data.cpu(), 1)[j] == labels.cpu()[j])\n\n if i % config.print_freq == 0:\n print(f'Epoch: [{epoch}][{i}/{len(val_loader)}]\\t'\n f'Batch Time {batch_time.value():.3f}\\t'\n f'Epoch Time {data_time.value():.3f}\\t'\n f'Prec@1 {prec.value(1):.3f}\\t')\n\n mAP = retrieval_map.mAP()\n print(f' instance accuracy at epoch {epoch}: {prec.value(1)} ')\n print(\n f' mean class accuracy at epoch {epoch}: {(np.mean(np.array(total_right_class) / np.array(total_seen_class, dtype=np.float)))} ')\n print(f' map at epoch {epoch}: {mAP} ')\n return prec.value(1), mAP", "def validate(model,val_dataloader,loss_fn):\n model.eval()\n total_loss = 0\n \n for batch_index, batch in enumerate(val_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n total_loss += loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n\n total_loss *= float(val_dataloader.batch_size) / len(val_dataloader.dataset)\n return total_loss", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def test(self):\n self.training = False", "def _validation_loss(self) -> Tuple[float, int]:\n logger.info(\"Validating\")\n\n self.model.eval()\n\n # Replace parameter values with the shadow values from the moving averages.\n if self._moving_average is not None:\n self._moving_average.assign_average_value()\n\n if self._validation_iterator is not None:\n val_iterator = self._validation_iterator\n else:\n val_iterator = self.iterator\n\n num_gpus = len(self._cuda_devices)\n\n raw_val_generator = val_iterator(self._validation_data,\n num_epochs=1,\n shuffle=False)\n val_generator = lazy_groups_of(raw_val_generator, num_gpus)\n num_validation_batches = math.ceil(val_iterator.get_num_batches(self._validation_data)/num_gpus)\n val_generator_tqdm = Tqdm.tqdm(val_generator,\n total=num_validation_batches)\n batches_this_epoch = 0\n val_loss = 0\n for batch_group in val_generator_tqdm:\n\n output_dict = self.get_output_dict(batch_group, for_training=False)\n loss = self.get_batch_loss(output_dict, for_training=False)\n\n if loss is not None:\n # You shouldn't necessarily have to compute a loss for validation, so we allow for\n # `loss` to be None. We need to be careful, though - `batches_this_epoch` is\n # currently only used as the divisor for the loss function, so we can safely only\n # count those batches for which we actually have a loss. If this variable ever\n # gets used for something else, we might need to change things around a bit.\n batches_this_epoch += 1\n val_loss += loss.detach().cpu().numpy()\n\n # Update the description with the latest metrics\n val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)\n description = training_util.description_from_metrics(val_metrics)\n val_generator_tqdm.set_description(description, refresh=False)\n\n # Now restore the original parameter values.\n if self._moving_average is not None:\n self._moving_average.restore()\n\n return val_loss, batches_this_epoch", "def train(self):\n args = self.args\n mnist = self.mnist\n feed_valid = {self.x: mnist.validation.images, self.y: mnist.validation.labels}\n feed_test = {self.x: mnist.test.images, self.y: mnist.test.labels}\n print('------------------------')\n print(\"epoch | l2_loss (v) | ce_loss (v) | valid_err (s) | valid_err (m) | test_err (s) | test_err (m)\")\n\n for ep in range(args.num_epochs):\n num_mbs = int(args.num_train / args.batch_size)\n for _ in range(num_mbs):\n batch = mnist.train.next_batch(args.batch_size)\n feed = {self.x: batch[0], self.y: batch[1]}\n self.sess.run(self.train_step, feed)\n valid_stats = self.sess.run(self.stats, feed_valid)\n test_stats = self.sess.run(self.stats, feed_test)\n\n valid_err_single = 100*(1.0-valid_stats['accuracy'])\n valid_err_model = self.eval_valid.eval(valid_stats['y_softmax'])\n test_err_single = 100*(1.0-test_stats['accuracy'])\n test_err_model = self.eval_test.eval(test_stats['y_softmax'])\n\n print(\"{:5} {:9.4f} {:9.4f} {:10.3f} {:10.3f} {:10.3f} {:10.3f}\".format(ep,\n valid_stats['l2_loss'], valid_stats['cross_entropy'],\n valid_err_single, valid_err_model,\n test_err_single, test_err_model))", "def crossValidate(self, args):\n\n ##################################\n # Read the training data\n ##################################\n if not os.path.isdir(args.annotationPath):\n print('annotation path does not exist: {}' \\\n .format(args.annotationPath))\n return -1\n\n data = self.readData(args.annotationPath)\n\n ############################\n # Execute the K-Fold cross validation\n ############################\n\n x = []\n y = []\n l = []\n for subject, df in data.items():\n lx = df[['gradient', 'rate']].values.tolist()\n #lx = df[['rate']].values.tolist()\n ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)\n x.extend(lx)\n y.extend(ly.tolist())\n l.append(len(lx))\n\n x = np.array(x)\n y = np.array(y)\n\n print('Executing cross-validation with k = {}...'.format(args.k))\n clf = StructuredPerceptron(random_state=2)\n scores = []\n folds = SequenceKFold(l, n_folds=args.k)\n for train_idx, train_len, test_idx, test_len in folds:\n xTrain = x[train_idx]\n yTrain = y[train_idx]\n clf.fit(xTrain, yTrain, train_len)\n\n xTest = x[test_idx]\n yTest = y[test_idx]\n yPred = clf.predict(xTest, test_len)\n scores.append(accuracy_score(yTest, yPred))\n\n scores = np.array(scores)\n print(scores)\n print('Result of the K-Fold CV: {:3f} (+- {:3f})' \\\n .format(scores.mean(), 2 * scores.std()))\n\n ############################\n # Execute the Leave-One-Out cross validation\n ############################\n\n\n return 0", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def _valid_epoch(self, epoch):\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(self.valid_data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n output = self.model(data)\n loss = self.criterion(output, target)\n\n self.writer.set_step(\n (epoch - 1) * len(self.valid_data_loader) + batch_idx, \"valid\"\n )\n self.valid_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.valid_metrics.update(met.__name__, met(output, target))\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n # add histogram of model parameters to the tensorboard\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(name, p, bins=\"auto\")\n return self.valid_metrics.result()", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def _validation(self, data_loader: torch.utils.data.DataLoader,\n data_size: int, calc_mapk: bool):\n\n self.model.eval()\n total_loss = torch.Tensor([0])\n if calc_mapk:\n users_processed = set()\n\n with tqdm(total=data_size//self.batch_size) as pbar:\n for _, ((row, col), val) in enumerate(data_loader):\n\n row = row.long()\n if isinstance(col, list):\n col = tuple(c.long() for c in col)\n else:\n col = col.long()\n\n pred = self.model(row, col)\n loss = self.loss_function(pred)\n total_loss += loss.item()\n\n if calc_mapk:\n user_lst_batch = set(row.tolist()) - users_processed\n users_processed.update(user_lst_batch)\n recommended_items = self.recommend_batch(user_lst_batch)\n d = dict(zip(user_lst_batch, recommended_items))\n self.recommended_items_dct.update(d)\n\n pbar.update(1)\n\n total_loss /= data_size\n if calc_mapk:\n test_mapk = self.get_mapk()\n return total_loss[0], test_mapk\n else:\n return total_loss[0]", "def train_model_with_validation(model, train_loader, validation_loader, criterion,\n optimizer, lr_scheduler=None, num_epochs=20):\n since = time.time()\n\n best_model = model\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n model.train(True)\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n if lr_scheduler is not None:\n lr_scheduler.step(epoch)\n\n running_loss = 0.0\n running_corrects = 0\n\n current_batch = 0\n # Iterate over data.\n for inputs, labels in train_loader:\n start_time = time.time()\n current_batch += 1\n\n # wrap them in Variable\n inputs, labels = Variable(inputs.cuda()), \\\n Variable(labels.cuda())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n\n # backward\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += float(loss)\n running_corrects += torch.sum(preds == labels.data)\n\n if current_batch % 250 == 0:\n curr_acc = running_corrects / (current_batch * train_loader.batch_size)\n curr_loss = running_loss / (current_batch * train_loader.batch_size)\n time_elapsed = time.time() - since\n\n print('Epoch Number: {}, Batch Number: {}, Loss: {:.4f}, Acc: {:.4f}'.format(\n epoch, current_batch, curr_loss, curr_acc))\n print('Time so far is {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n\n\n\n validation_acc = test_model(model, validation_loader)\n print('Epoch Number: {}, Validation Accuracy: {:.4f}'.format(epoch, validation_acc))\n\n # deep copy the model\n if validation_acc > best_acc:\n best_acc = validation_acc\n best_model = copy.deepcopy(model)\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n model.train(False)\n\n return best_model", "def train(self, train_gen, valid_gen=None, optimizer=SGD(lr=0.001, momentum=0.9, decay=0.00004, nesterov=True), classifier_dropout=0.7,\n steps_per_epoch=100, validation_steps=100, epochs=1, out_prefix='', out_period=1, fix_extractor=False):\n \n self.classifier().trainable = True\n self.extractor().trainable = not fix_extractor \n \n self.classifier().layers[1].rate = classifier_dropout\n \n train_model = Sequential([self.extractor(), self.classifier()])\n train_model.summary()\n \n train_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])\n \n out_dir = os.path.dirname(out_prefix)\n if out_dir != '' and not os.path.exists(out_dir):\n os.mkdir(out_dir)\n \n callbacks = []\n callbacks.append(TensorBoard())\n if out_prefix is not None:\n callbacks.append(self.SaveWeightsCallback(target_models=[self.extractor(), self.classifier()], out_prefix=out_prefix, period=out_period)) \n history = train_model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch, epochs=epochs+self.current_epochs, callbacks=callbacks, workers=0, validation_data=valid_gen, validation_steps=validation_steps, initial_epoch=self.current_epochs)\n self.current_epochs += epochs\n \n return history", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def val_one_epoch(self):\n # TODO: add IoU compute function\n print('Validating:')\n\n # set mode eval\n self.network.eval()\n\n # prepare data\n val_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='val', transforms = transform)\n val_loader = DataLoader(dataset,\n batch_size=params.val_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n val_size = 348\n if val_size % self.params.val_batch != 0:\n total_batch = val_size // self.params.val_batch + 1\n else:\n total_batch = val_size // self.params.val_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # validate through dataset\n for batch_idx, batch in enumerate(val_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n \n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new +accuracy_final\n loss = self.loss_fn(out, label_cuda)\n val_loss += loss.item()\n\n # record first loss\n if self.val_loss == []:\n self.val_loss.append(val_loss)\n self.summary_writer.add_scalar('loss/val_loss', val_loss, 0)\n \n print(accuracy_new/total_batch)\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n self.pb.close()\n val_loss /= total_batch\n self.val_loss.append(val_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/val_loss', val_loss, self.epoch)", "def train(x_train, y_train, labels_train, batch_size=32, epoch=200):\n skf = KFold(n_splits=5, random_state=17, shuffle=True)\n i = 0\n model_paths = []\n for train_index, test_index in skf.split(x_train):\n x_tr_fold = x_train[train_index]\n y_tr_fold = y_train[train_index]\n x_val_fold = x_train[test_index]\n y_val_fold = y_train[test_index]\n lab_val_fold = labels_train[test_index]\n\n model = VGG_Unet_model()\n optim = Adam()\n model_paths += [\"models/fold{}.h5\".format(i)]\n model.compile(optimizer=optim, loss=full_loss, metrics=[dice_coef])\n callbacks_list = [TestCallback((x_val_fold, lab_val_fold), model_paths[-1], once_in=25)]\n\n x_val, y_val = [], []\n for x_cur, y_cur in zip(x_val_fold, y_val_fold):\n x_val.extend(get_cropped_array(x_cur))\n y_val.extend(get_cropped_array(y_cur))\n x_val = [augment_test(x)[0] for x in x_val]\n x_val = np.array(x_val)\n y_val = np.array(y_val)\n subsample_ind = np.random.choice(len(x_val), size=200, replace=False)\n\n steps_per_epoch = x_tr_fold.shape[0] // batch_size\n\n model.fit_generator(batch_generator(x_tr_fold, y_tr_fold, batch_size, augment_train),\n steps_per_epoch=steps_per_epoch,\n epochs=epoch, verbose=1, callbacks=callbacks_list, workers=6,\n validation_data=(x_val[subsample_ind], y_val[subsample_ind]))\n i += 1\n return model_paths", "def train(self, batch):\n pass", "def train_model(model, epochs, optimizer, loss_function, train_iterator, valid_iterator):\n for epoch in range(epochs):\n model.train()\n train_loss = 0.0\n train_acc = 0.0\n for i, batch in enumerate(train_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n optimizer.zero_grad()\n\n output = model(feature, batch_length)\n\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_acc += acc.item()\n print(\n f\"Train:: Epoch: {epoch}, Loss: {train_loss / len(train_iterator)}, Accuracy: {train_acc / len(train_iterator)}\")\n\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n for i, batch in enumerate(valid_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n\n output = model(feature, batch_length)\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n val_loss += loss.item()\n val_acc += acc.item()\n\n print(\n f\"Validation:: Epoch: {epoch}, Loss: {val_loss / len(valid_iterator)}, Accuracy: {val_acc / len(valid_iterator)}\")\n print(\"\")", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(X_train,Y_train,X_test,Y_test,depoch=50,ftepoch=201,batch_size=32,classnum=100,out='inceptionv3-ft.model'):\n nb_train_samples = len(Y_train)\n nb_classes = classnum\n nb_val_samples = len(Y_test)\n batch_size = batch_size\n\n # data prep\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=20,#角度\n width_shift_range=0.2,#水平偏移\n height_shift_range=0.2,#高度偏移\n shear_range=0.1,#剪切强度,逆时针方向的剪切变化角度\n zoom_range=0.2,#随机缩放的幅度\n horizontal_flip=True,#进行随机水平反转\n vertical_flip=False#进行竖直反转\n )\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.1,\n zoom_range=0.2,\n horizontal_flip=True\n )\n\n train_generator = train_datagen.flow(X_train, Y_train, batch_size=batch_size, seed=42)\n\n validation_generator = test_datagen.flow(X_test, Y_test, batch_size=batch_size, seed=42)\n X_test = preprocess_input(X_test)\n # setup model\n base_model = VGG19(weights='imagenet', include_top=False,input_shape=(224,224,3)) #include_top=False excludes final FC layer\n model = add_new_last_layer(base_model, nb_classes)\n\n # transfer learning\n setup_to_transfer_learn(model, base_model)\n for i in range(depoch):\n print('Epoch: ',i)\n model.fit_generator(train_generator,epochs=1,\n steps_per_epoch = int(nb_train_samples/batch_size))\n #score, acc = model.evaluate_generator(validation_generator,int(nb_val_samples/batch_size),workers=30,max_q_size=100)\n #print('epoch: ',i,' val_acc: ',acc)\n score1, acc1 = model.evaluate(X_test, Y_test, batch_size=batch_size)\n print('epoch: ',i,'eval_acc: ',acc1)\n\n # fine-tuning\n setup_to_finetune(model)\n for i in range(ftepoch):\n print('Epoch: ',i)\n model.fit_generator(train_generator,epochs=1,\n steps_per_epoch = int(nb_train_samples/batch_size))\n #score,acc = model.evaluate_generator(validation_generator,int(nb_val_samples/batch_size),workers=30,max_q_size=100)\n #print('epoch: ',i,' val_acc: ',acc)\n score1, acc1 = model.evaluate(X_test, Y_test, batch_size=batch_size)\n print('epoch: ',i,'eval_acc: ',acc1)\n if i%10 == 0 and i !=0:\n model.save(out+str(i))\n #X_test = preprocess_input(X_test)\n score, acc = model.evaluate(X_test, Y_test, batch_size=batch_size)\n print('now accu:',acc)\n print('ALL DONE')", "def _cross_validate(self, fit_params={}):\n\n # Flatten the true labels for the training data\n y_train = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n\n if self.model.estimator_type == \"classifier\":\n\n # Get unique labels for classification\n labels = np.unique(y_train)\n\n # Set up a dictionary for the scoring metrics\n scoring = {'accuracy':'accuracy'}\n\n # Prepare arguments for the scorers\n metric_args = self.model.metric_args\n \n if 'average' in metric_args and metric_args['average'] is not None:\n # If the score is being averaged over classes a single scorer per metric is sufficient\n scoring['precision'] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall'] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore'] = metrics.make_scorer(metrics.f1_score, **metric_args)\n\n output_format = \"clf_overall\"\n else:\n # If there is no averaging we will need multiple scorers; one for each class\n for label in labels:\n metric_args['pos_label'] = label\n metric_args['labels'] = [label]\n scoring['precision_'+str(label)] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall_'+str(label)] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore_'+str(label)] = metrics.make_scorer(metrics.f1_score, **metric_args)\n \n output_format = \"clf_classes\"\n\n elif self.model.estimator_type == \"regressor\":\n scoring = ['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_median_absolute_error', 'explained_variance']\n \n # Perform cross validation using the training data and the model pipeline\n scores = cross_validate(self.model.pipe, self.X_train, y_train, scoring=scoring, cv=self.model.cv, fit_params=fit_params, return_train_score=False)\n\n # Prepare the metrics data frame according to the output format\n if self.model.estimator_type == \"classifier\": \n # Get cross validation predictions for the confusion matrix\n y_pred = cross_val_predict(self.model.pipe, self.X_train, y_train, cv=self.model.cv, fit_params=fit_params)\n\n # Prepare the confusion matrix and add it to the model\n self._prep_confusion_matrix(y_train, y_pred, labels)\n\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"])\n\n if output_format == \"clf_overall\": \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.average(scores[\"test_precision\"]), np.std(scores[\"test_precision\"]),\\\n np.average(scores[\"test_recall\"]), np.std(scores[\"test_recall\"]),\\\n np.average(scores[\"test_fscore\"]), np.std(scores[\"test_fscore\"])]\n\n elif output_format == \"clf_classes\":\n # Add accuracy which is calculated at an overall level\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]\n\n # Add the metrics for each class to the data frame\n for i, label in enumerate(labels):\n metrics_df.loc[i+1] = [label, np.NaN, np.NaN, np.average(scores[\"test_precision_\"+str(label)]),\\\n np.std(scores[\"test_precision_\"+str(label)]), np.average(scores[\"test_recall_\"+str(label)]),\\\n np.std(scores[\"test_recall_\"+str(label)]), np.average(scores[\"test_fscore_\"+str(label)]),\\\n np.std(scores[\"test_fscore_\"+str(label)])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"accuracy\"].values[0]\n\n elif self.model.estimator_type == \"regressor\":\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"])\n \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [np.average(scores[\"test_r2\"]), np.std(scores[\"test_r2\"]),\\\n np.average(scores[\"test_neg_mean_squared_error\"]), np.std(scores[\"test_neg_mean_squared_error\"]),\\\n np.average(scores[\"test_neg_mean_absolute_error\"]), np.std(scores[\"test_neg_mean_absolute_error\"]),\\\n np.average(scores[\"test_neg_median_absolute_error\"]), np.std(scores[\"test_neg_median_absolute_error\"]),\\\n np.average(scores[\"test_explained_variance\"]), np.std(scores[\"test_explained_variance\"])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"r2_score\"].values[0]\n\n # Save the metrics_df to the model\n self.model.metrics_df = metrics_df", "def train_model_cross_validation(model, train_docs, test_docs, nb_iter, output_dir, spacy_type = True, nb_folds = 5):\n\n print(output_dir)\n os.mkdir(output_dir) # creating the output directory\n print(\" ============= TRAINING MODEL ===========================\")\n\n\n # tuple conversion (the tuple type is lost when dataframe -> excel -> dataframe)\n\n #docs['annotations'] = [[tuple(ann) for ann in annotations] for annotations in docs['annotations'].to_numpy()]\n\n\n # cross validation :\n\n models = []\n all_scores = []\n\n kf = KFold(n_splits=nb_folds)\n c = 0\n for train_index, val_index in kf.split(train_docs):\n\n train_data = train_docs.iloc[train_index, :]\n val_data = train_docs.iloc[val_index, :]\n\n # spacy_format\n TRAIN_DATA = [(text, {'entities': entities}) for [text, entities] in train_data[['text', 'annotations']].to_numpy()]\n\n # trim entities : leading whitespace make the model bug\n TRAIN_DATA = trim_entity_spans(TRAIN_DATA)\n\n # loading of the model\n nlp = model\n\n optimizer = nlp.begin_training()\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\" ] #\"trf_wordpiecer\", \"trf_tok2vec\"\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\n scores = []\n\n # training\n with nlp.disable_pipes(*other_pipes): # only train NER\n\n if not spacy_type : # add the other labels\n ner = nlp.get_pipe(\"ner\")\n ner.add_label('AGE_RELATED')\n ner.add_label('DURATION')\n ner.add_label('FREQUENCY')\n ner.add_label('OTHER')\n\n for i in range(nb_iter):\n\n print('Iteration ', i)\n print()\n losses = {}\n random.shuffle(TRAIN_DATA) # ??\n\n path = ''\n if spacy_type:\n path = 'spacy_model_' + str(c) + '_fold'\n else:\n path = 'all_types_model_' + str(c) + '_fold'\n\n batches = minibatch(TRAIN_DATA, size=1) #compounding(4.0, 20.0, 1.001)\n\n for batch in batches:\n texts, annotations = zip(*batch)\n try:\n nlp.update(texts, annotations, sgd = optimizer, drop=0.5, losses = losses)\n print(\"Losses\", losses)\n except Exception as e:\n print(e)\n #print(text)\n\n tp_g, fp_g, fn_g, p, r, f, pt, rt, ft, type_dict = test_model(test_docs, nlp)\n scores += [(p, r, r, pt, rt, ft)]\n print()\n print()\n\n # test the trained model\n test_model(val_data, nlp)\n\n df_scores = pd.DataFrame(scores, columns = ['span_precision', 'span_recall', 'span_f1', 'type_precision', 'type_recall', 'type_f1'])\n df_scores.to_excel(output_dir + '/' + path + '.xlsx')\n\n\n models += [nlp]\n all_scores += [scores]\n # save model to output directory\n if output_dir is not None:\n nlp.to_disk(output_dir + '/' + path)\n print(\"Saved model to\", output_dir + '/' + path)\n\n c += 1\n\n return models, all_scores", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def _doValidation(self, val_dl: torch.utils.data.DataLoader) -> float:\n\n # Initialize the variable for tracking the validation loss\n val_loss = 0.0\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the validation data loader\n for x_batch, y_batch in val_dl:\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n # Accumulate the validation loss\n val_loss += loss.item()\n\n # Compute the average validation loss\n val_loss /= len(val_dl)\n # Return the validation loss and None values for additional metrics\n return val_loss, None, None, None", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model" ]
[ "0.7692694", "0.75106955", "0.72990733", "0.7281097", "0.7261256", "0.7184241", "0.7167047", "0.71011925", "0.7094827", "0.7077488", "0.7063481", "0.70511085", "0.7027731", "0.69944483", "0.6994215", "0.6989954", "0.6987662", "0.6927485", "0.6925709", "0.69221634", "0.6896365", "0.6875067", "0.68743205", "0.6868029", "0.6862959", "0.68506783", "0.68506783", "0.68506783", "0.68506783", "0.68506783", "0.68462414", "0.68420696", "0.6828957", "0.6826427", "0.68232805", "0.68017995", "0.6779436", "0.6771654", "0.67699564", "0.67689466", "0.6768252", "0.6762953", "0.67569757", "0.67510223", "0.674743", "0.67441964", "0.6738242", "0.6728973", "0.6718437", "0.67149115", "0.67007", "0.670007", "0.6695769", "0.66880083", "0.66827303", "0.66825527", "0.66763246", "0.667476", "0.6674012", "0.66688794", "0.6668123", "0.6661368", "0.6660562", "0.66517717", "0.66492784", "0.6646852", "0.6640246", "0.66345316", "0.66325873", "0.6624677", "0.66171116", "0.6615503", "0.6602924", "0.65939325", "0.65886956", "0.6586192", "0.65853083", "0.6582852", "0.6573991", "0.65705997", "0.65652615", "0.65642035", "0.6563936", "0.6561819", "0.6555864", "0.65528995", "0.6538892", "0.6532816", "0.6532335", "0.6528683", "0.6517673", "0.6517338", "0.65165055", "0.6510809", "0.6509091", "0.65086687", "0.6505605", "0.650556", "0.65047336", "0.6501831" ]
0.747869
2
Train the model for one epoch.
def fit_one_epoch(self): preds, labels = [], [] for batch_idx, data in tqdm(enumerate(self.primary_dataloader)): losses_report, train_preds, train_labels = self.forward_one_batch( data) preds.append(train_preds) labels.append(train_labels) self._optimize(losses_report) self._update_losses(losses_report, train=True) self.iter += 1 # log/check point with torch.no_grad(): if self.iter % self.log_iter == 0: # TODO: track train preds = np.concatenate(preds, axis=0) labels = np.concatenate(labels, axis=0) if IS_REG: preds = disc(preds) metrics_report = self.evaluate_metrics(preds, labels) self._update_metrics(metrics_report, train=True) preds, labels = [], [] if self.valid_dataloader: self.validate() self.log_meters() self.save_checkpoint() self.reset_meters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def train_one_epoch(self):\n prog_bar = tqdm(enumerate(self.train_data), total=len(self.train_data))\n self.model.train()\n with autocast():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask) \n\n loss = self.loss_fn(outputs.squeeze(1), targets)\n prog_bar.set_description('loss: {:.2f}'.format(loss.item()))\n\n Config.scaler.scale(loss).backward()\n Config.scaler.step(self.optimizer)\n Config.scaler.update()\n self.optimizer.zero_grad()\n self.scheduler.step()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def set_train(self):\n self.model.train()", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train(self, batch):\n pass", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train_epoch(self, train=False):\n # init params\n config = self.config\n writer = self.writer\n train_params = self.get_train_params()\n args = self.args\n # net, net_SP = self.net, self.net_SP\n optimizer, optimizer_SP = self.optimizer, self.optimizer_SP\n\n lr = self.get_learning_rate()\n logging.info(f\"current learning rate: {lr}\")\n\n running_losses = []\n self.save_lists = [\n \"err_q\",\n \"err_t\",\n \"epi_dists\",\n \"relative_poses_cam\",\n \"relative_poses_body\",\n ]\n dict_of_lists_in_train = init_dict_of_lists(config, self.save_lists)\n dict_of_lists_in_val = init_dict_of_lists(config, self.save_lists)\n if_val_in_train_trigger = False\n\n thd_corr = 300\n writer.add_scalar(\"training-lr\", lr, self.n_iter)\n\n # Train one epoch\n for i, sample_train in tqdm(enumerate(self.train_loader)):\n # if training\n if train:\n # eval in training script\n if (\n self.n_iter != 0\n and self.n_iter % config[\"training\"][\"val_interval_in_train\"] == 0\n ):\n if_val_in_train_trigger = True\n if if_val_in_train_trigger:\n logging.info(\n \"+++[Train]+++ Collecting training batch for %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n else:\n self.net.train()\n\n # train one batch\n (\n loss_train_out,\n dict_of_lists_in_train,\n clamp_cum,\n ) = self.train_val_batch(\n train_params,\n sample_train,\n True,\n if_val=if_val_in_train_trigger,\n dict_of_lists=dict_of_lists_in_train,\n )\n\n if if_val_in_train_trigger:\n if (\n dict_of_lists_in_train[\"count\"]\n > config[\"training\"][\"val_batches\"]\n ):\n dict_of_lists_in_train = self.flush_dict_of_lists(\n writer, \"training\", self.n_iter, **dict_of_lists_in_train\n )\n if_val_in_train_trigger = False\n else:\n # running_losses.append(loss_train_out)\n print(self.n_iter, \"%.8f\" % loss_train_out)\n self.n_iter += 1\n\n # if testing\n if args.eval and self.n_iter % config[\"training\"][\"val_interval\"] == 0:\n logging.info(\n \"+++[Val]+++ Validating %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n assert self.net.training == False\n for j, sample_val in tqdm(enumerate(self.val_loader)):\n # if not self.check_num_of_matches(sample, thd=thd_corr): continue\n logging.info(\"+++[Val]+++ Validating batch %d\" % (j))\n # logging.info(f\"frame_id: {sample_val['frame_ids']}\")\n loss_val_out, dict_of_lists_in_val, _ = self.train_val_batch(\n train_params, sample_val,\n False, if_val=True, dict_of_lists=dict_of_lists_in_val,\n ) ##### check: in order to align val and training\n self.n_iter_val += 1\n if config[\"training\"][\"val_batches\"] != -1 and (\n j > config[\"training\"][\"val_batches\"]\n ): ##### check: how to limit the validation\n break\n print(dict_of_lists_in_val.keys())\n\n ## save valdiation result (dict)\n if len(config[\"exps\"][\"filename\"]) > 3:\n # print(f\"dict_of_lists_in_val: {dict_of_lists_in_val}\")\n def get_dict(key_layer1, key_layer2, dict_of_lists):\n dict_of_array = {}\n for k in key_layer1:\n dict_of_array[k] = np.stack(dict_of_lists[k][key_layer2])\n return dict_of_array\n\n our_name, base_name = (\n config[\"exps\"][\"our_name\"],\n config[\"exps\"][\"base_name\"],\n )\n\n print(f'save dict_of_lists_in_val to {config[\"exps\"][\"filename\"]}')\n # save our results\n dict_of_lists = get_dict(\n self.save_lists, our_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{our_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # save base_name\n dict_of_lists = get_dict(\n self.save_lists, base_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{base_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # output then flush\n dict_of_lists_in_val = self.flush_dict_of_lists(\n writer, \"validating\", self.n_iter, **dict_of_lists_in_val\n )\n\n # epoch_loss = np.mean(np.asarray(running_losses))\n\n # training iterations\n self.epoch += 1\n if self.n_iter > config[\"training\"][\"train_iter\"]:\n break\n return 0.0, self.clamp_cum, self.n_iter, self.n_iter_val", "def train(self,\n epochs=10,\n track_every=20):\n self.model.train()\n print(\"Model put in training mode.\")\n\n for i in range(epochs):\n stop_training = False\n batch_losses = []\n for j, sample in enumerate(self.training_set):\n\n # Run single loop.\n loss = self.partial_fit(sample)\n batch_losses.append(loss)\n self.print_progress(epoch=i,\n batch=j,\n loss=loss)\n\n if j % track_every == 0 and j != 0:\n batch_loss = numpy.mean(numpy.array(batch_losses))\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n stop_training = self.estopper.check_stop_training(val_loss)\n\n if stop_training:\n break\n\n # End batch iteration.\n\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n if stop_training:\n print(\"Early stopping.\")\n torch.save(self.model, self.save_dir + \"model.pt\")\n print(f\"Model saved to {self.save_dir}model.pt\")\n break\n\n # End training loop.", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train_epoch(self, epoch):\n device_mapper = self.device_mapper\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, data in enumerate(self.data_loader):\n data = device_mapper.map_modules(data, non_blocking=True)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.loss(output, data)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item(), batch_size=output.size(0))\n for met in self.metrics:\n self.train_metrics.update(met.name(), met(output, data), batch_size=output.size(0))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: %d %s Loss: %.6f', epoch, self._progress(batch_idx), loss.item())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self.valid_epoch(epoch)\n log.update(**{'val_' + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(log[\"val_roc_auc\"])\n else:\n self.lr_scheduler.step()\n return log", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()", "def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target_seg, target_class) in enumerate(self.data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n self.optimizer.zero_grad()\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.train_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.train_metrics.update(met.__name__, met(output_seg, target_seg))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n self._visualize_input(data.cpu())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def train_one_epoch(self):\n self.model.train()\n for batch_idx, (imgs, labels) in enumerate(self.tr_loader):\n imgs, labels = imgs.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n\n outputs, aux_outputs = self.model(imgs).values()\n loss1 = self.criterion(outputs, labels)\n loss2 = self.criterion(aux_outputs, labels)\n self.loss = loss1 + 0.3*loss2\n\n _, preds = torch.max(outputs, 1)\n acc = preds.eq(labels.view_as(preds)).sum().item() / self.cfg.bs\n\n self.loss.backward()\n self.optimizer.step()\n \n self.summary_writer.add_scalars('scalar_group', \n { 'loss_end' : loss1.item(),\n 'loss_aux' : loss2.item(),\n 'loss_total' : self.loss.item(),\n 'accuracy' : acc},\n self.current_iteration)\n\n if batch_idx % self.cfg.log_interval == 0:\n info_1 = 'Epochs {} [{}/{} ({:.0f}%)] | Loss: {:.6f}' .format(\n self.current_epoch, \n batch_idx * len(imgs), \n len(self.tr_loader.dataset), \n 100. * batch_idx / len(self.tr_loader),\n self.loss.item())\n info_2 = 'Batch Accuracy : {:.2f}'.format(acc)\n self.logger.info('{} | {}'.format(info_1, info_2))\n self.save_checkpoint('{}_epoch{}_iter{}.pt'.format(\n self.cfg.exp_name,\n self.current_epoch, \n self.current_iteration)\n )\n self.current_iteration += 1", "def train(self, training_steps=10):", "def train_model(dataset):\n\n # clear the session so that we can train more than one model\n K.clear_session()\n\n # initialize the model\n model = initalizer.init_nn()\n\n # fit the model\n model.fit(dataset, epochs=40)\n\n return model", "def train_model(self):\n early_stopping = EarlyStopping(self, self.hyper.early_stopping_enabled, self.hyper.early_stopping_limit)\n loss_history_train = []\n loss_metric_train = tf.keras.metrics.Mean()\n\n x_train, next_values_train = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train,\n self.dataset.next_values_train])\n\n x_train_val, next_values_train_val = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train_val,\n self.dataset.next_values_train_val])\n\n for epoch in range(self.hyper.epochs):\n print(\"Epoch %d\" % (epoch,))\n\n for step, (x_batch_train, next_values_batch_train) in enumerate(zip(x_train, next_values_train)):\n self.train_step(x_batch_train, next_values_batch_train, loss_metric_train)\n\n if step % 50 == 0:\n print(\"\\tStep %d: mean loss = %.4f\" % (step, loss_metric_train.result()))\n\n loss_train_batch = loss_metric_train.result()\n loss_history_train.append(loss_train_batch)\n loss_metric_train.reset_states()\n\n self.model.save_weights(self.checkpoint_path.format(epoch=epoch))\n\n # Check early stopping criterion --> Has the loss on the validation set not decreased?\n best_epoch = early_stopping.execute(epoch, x_train_val, next_values_train_val)\n self.clean_up(early_stopping, epoch)\n\n if best_epoch > 0:\n print('Model from epoch %d was selected by early stopping.' % best_epoch)\n print('Training process will be stopped now.')\n\n self.save_model(best_epoch)\n\n return\n\n self.save_model(epoch=self.hyper.epochs - 1)", "def train_epoch(model, train_dataloader, optimizer, loss_fn):\n model.train()\n total_training_loss = 0\n for batch_index, batch in enumerate(train_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_training_loss += loss", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def TrainOneStep(self):\n pass", "def train(self, num_batches: int):", "def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n\n for batch_data in train_loader:\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def train_one_epoch(self, data_loader=None):\n if data_loader is None:\n data_loader = self.dataset.train_data_loader\n\n m = self.model\n loss_fn = self.model.calculate_loss\n\n m.train()\n losses = None\n opt = self.opt.opt\n if hasattr(self.opt, 'scheduler'):\n pass\n # TODO: scheduler\n # sch = self.opt.scheduler\n else:\n pass\n # sch = None\n for idx, data in tqdm(enumerate(data_loader), total=len(data_loader)):\n if type(data) is dict:\n for key, value in data.items():\n data[key] = value.to(self.device)\n opt.zero_grad()\n loss = loss_fn(data)\n loss.backward()\n opt.step()\n losses = loss.item() if losses is None else losses + loss.item()\n return losses", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(nepochs, model): \n if model == 'cnn':\n return gennet.train_cnn(nepochs, 'Resnet50')\n elif model == 'logreg':\n return gennet.train_logreg('Resnet50')", "def _train_epoch(self, epoch):\n self.model.train()\n total_loss = 0\n\n self.logger.info('Train Epoch: {}'.format(epoch))\n\n for batch_idx, (data) in enumerate(self.train_loader):\n start_it = time()\n data = data.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n if isinstance(output, tuple):\n loss = self.model.loss(data, *output)\n else:\n loss = self.model.loss(data, output)\n loss.backward()\n self.optimizer.step()\n\n step = epoch * len(self.train_loader) + batch_idx\n self.tb_writer.add_scalar('train/loss', loss.item(), step)\n # self.comet_writer.log_metric('loss', loss.item(), step)\n\n total_loss += loss.item()\n\n end_it = time()\n time_it = end_it - start_it\n if batch_idx % self.log_step == 0:\n self.logger.info(\n ' > [{}/{} ({:.0f}%), {:.2f}s] Loss: {:.6f} '.format(\n batch_idx * self.train_loader.batch_size + data.size(\n 0),\n len(self.train_loader.dataset),\n 100.0 * batch_idx / len(self.train_loader),\n time_it * (len(self.train_loader) - batch_idx),\n loss.item()))\n # grid = make_grid(data.cpu(), nrow=8, normalize=True)\n # self.tb_writer.add_image('input', grid, step)\n\n self.logger.info(' > Total loss: {:.6f}'.format(\n total_loss / len(self.train_loader)\n ))\n\n return total_loss / len(self.train_loader)", "def train(self, X, y):\n self.model.fit(X, y)", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train_an_epoch(self, sampler, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n for batch_id in range(self.num_batch):\n (\n u,\n seq,\n time_seq,\n time_matrix,\n pos,\n neg,\n ) = sampler.next_batch() # tuples to ndarray\n batch_data = (\n np.array(u),\n np.array(seq),\n np.array(time_seq),\n np.array(time_matrix),\n np.array(pos),\n np.array(neg),\n )\n loss = self.train_single_batch(batch_data)\n # print(\n # \"loss in epoch {} iteration {}: {}\".format(epoch, step, loss.item())\n # ) # expected 0.4~0.6 after init few epochs\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, total_loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def epoch_begin(self, model):\n pass", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def train(self, dataset=None, epochs=2, verbose=1, workers=1):\n dataset = utils.prepare_dataset(dataset,\n self.config.batch_size,\n self.config.inputs,\n self.dtype,\n self.config.batch_decay)\n callbacks = [ModelCheckpoint(os.path.join(self.config.model_folder,\n '{epoch:03d}.hdf5'),\n monitor='val_loss',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto'),\n GeneratorCallback(self.config.test_string,\n self.config.inputs,\n self.config.generated_characters,\n self.dtype)\n ]\n for i in range(epochs):\n self.model.fit(dataset,\n initial_epoch=i,\n epochs=i + 1,\n verbose=verbose,\n use_multiprocessing=True,\n workers=workers,\n callbacks=callbacks)", "def train(self, training_data):\n pass", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def train(self, train_x,train_y):\n self._model.fit(train_x,train_y,batch_size=8,epochs = self._epochSize)\n return None", "def train(self, batchSize):\n\n\t\tEpochs = 100\n\n\t\tlogFile = self.resultDir + '/' + self.modelName + '_' + str(batchSize) +'.log'\n\t\tcsv_logger = CSVLogger(logFile, append=True, separator=\"\\t\")\n\t\t\n\t\tearlyStop = EarlyStopping(monitor='val_categorical_accuracy', patience=5, mode='auto', verbose=1, restore_best_weights=True)\n\t\t\n\t\t##filePath = self.resultDir + '/' + self.modelName + '_checkPoint_best_model.hdf5'\n\t\t#### This file will include the epoch number when it gets saved.\n\t\t##repeatingFile = self.resultDir + '/' + self.modelName +'_{epoch:02d}_epoch_acc_{accVar:.2f}.hdf5'\n\t\t#### By default the every_10epochs will save the model at every 10 epochs\n\t\t##checkPoint = newCallBacks.ModelCheckpoint_every_10epochs(filePath, repeatingFile, self.x_test, self.y_test_oh , monitor='val_categorical_accuracy', verbose=1, save_best_only=True, every_10epochs=True)\n\t\t\n\t\tself.history = self.model.fit(self.x_train, self.y_train_oh, batch_size= batchSize, epochs=Epochs, verbose=1, shuffle= True, validation_data=(self.x_dev, self.y_dev_oh), callbacks=[csv_logger, earlyStop])", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def train():\n pass", "def _train_epoch(self, epoch):\n\t\tstart = time.time()\n\t\tself.model.train()\n\t\tfor batch_idx, (images, labels) in enumerate(self.train_loader):\n\n\t\t\timages, labels = images.to(self.config.DEVICE), labels.to(self.config.DEVICE)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\toutput = self.model(images)\n\t\t\tloss = self.criterion(output, labels)\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\n\t\t\tn_iter = (epoch - 1) * len(self.train_loader) + batch_idx + 1\n\n\t\t\tlast_layer = list(self.model.children())[-1]\n\t\t\tfor name, para in last_layer.named_parameters():\n\t\t\t\tif 'weight' in name:\n\t\t\t\t\tself.logger_setup['writer'].add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)\n\t\t\t\tif 'bias' in name:\n\t\t\t\t\tself.logger_setup['writer'].add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)\n\n\t\t\tprint('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\\tLoss: {:0.4f}\\tLR: {:0.6f}'.format(\n\t\t\t\tloss.item(),\n\t\t\t\tself.optimizer.param_groups[0]['lr'],\n\t\t\t\tepoch=epoch,\n\t\t\t\ttrained_samples=self.config.loader_params['bs']*(batch_idx + 1),\n\t\t\t\ttotal_samples=self.total_train_samples))\n\t\t\n\t\t\t#update training loss for each iteration\n\t\t\tself.logger_setup['writer'].add_scalar('Train/loss', loss.item(), n_iter)\n\t\t\t\n\t\t\tif self.config.WARM_UP and (epoch <= self.config.WARM_EPOCH):\n\t\t\t\tself.warmup_scheduler.step()\n\n\t\tfor name, param in self.model.named_parameters():\n\t\t\tlayer, attr = os.path.splitext(name)\n\t\t\tattr = attr[1:]\n\t\t\tself.logger_setup['writer'].add_histogram(\"{}/{}\".format(layer, attr), param, epoch)\n\n\t\tfinish = time.time()\n\t\tprint('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def train(self):\n self.training = True", "def train(model, trainloader, device, optimizer, loss_function, epoch):\n global train_losses\n model.train()\n train_iter = 0\n loss_meter = AverageMeter(\"train-avg\")\n for x, _ in trainloader:\n x = x.to(device)\n z, logdet, _, logp = model(preprocess(x))\n loss = loss_function(logp, logdet, x.size())\n\n # code for rosalinty model\n # log_p_sum, logdet, z_outs = model(preprocess(x))\n # loss = loss_function(log_p_sum, logdet, x.size())\n\n if(train_iter % 10 == 0):\n print(f\"iteration: {train_iter}, loss: {loss.item()}\", end=\"\\r\")\n \n model.zero_grad()\n loss_meter.update(loss.item())\n loss.backward()\n optimizer.step()\n train_iter += 1\n print(f\"epoch complete, mean loss: {loss_meter.avg}\")\n train_losses.append({\"epoch\": epoch, \"avg_loss\": loss_meter.avg})", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n kl_loss = 0\n rec_loss = 0\n # with autograd.detect_anomaly():\n for batch_id, sample in enumerate(train_loader):\n assert isinstance(sample, torch.Tensor)\n pos_u = torch.tensor(\n [triple[0] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n pos_i_1 = torch.tensor(\n [triple[1] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n pos_i_2 = torch.tensor(\n [triple[2] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n neg_u = torch.tensor(\n self.data.user_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n neg_i_1 = torch.tensor(\n self.data.item_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n neg_i_2 = torch.tensor(\n self.data.item_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n batch_data = (pos_u, pos_i_1, pos_i_2, neg_u, neg_i_1, neg_i_2)\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n kl_loss += self.model.kl_loss\n rec_loss += self.model.rec_loss\n total_loss = total_loss / self.config[\"model\"][\"batch_size\"]\n rec_loss = rec_loss / self.config[\"model\"][\"batch_size\"]\n kl_loss = kl_loss / self.config[\"model\"][\"batch_size\"]\n print(\n \"[Training Epoch {}], log_like_loss {} kl_loss: {} alpha: {} lr: {}\".format(\n epoch_id,\n rec_loss,\n kl_loss,\n self.model.alpha,\n self.config[\"model\"][\"lr\"],\n )\n )\n self.writer.add_scalars(\n \"model/loss\",\n {\n \"total_loss\": total_loss,\n \"rec_loss\": total_loss - kl_loss,\n \"kl_loss\": kl_loss,\n },\n epoch_id,\n )", "def train_fru(model, epochs=EPOCHS):\n train(model, epochs=epochs, dataset=FRUDataset)", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def __epoch_train(self, data_loader, writer):\n self.model.train()\n for i, encode in enumerate(data_loader, 1):\n # update model\n encode = {k: v.to(self.device) for k, v in encode.items()}\n self.optimizer.zero_grad()\n loss = self.model(**encode)[0]\n if self.n_gpu > 1:\n loss = loss.mean()\n if self.args.fp16:\n with self.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n # optimizer and scheduler step\n self.optimizer.step()\n self.scheduler.step()\n # log instantaneous accuracy, loss, and learning rate\n inst_loss = loss.cpu().detach().item()\n inst_lr = self.optimizer.param_groups[0]['lr']\n writer.add_scalar('train/loss', inst_loss, self.__step)\n writer.add_scalar('train/learning_rate', inst_lr, self.__step)\n if self.__step % PROGRESS_INTERVAL == 0:\n LOGGER.info('[epoch %i] * (training step %i) loss: %.3f, lr: %0.8f'\n % (self.__epoch, self.__step, inst_loss, inst_lr))\n self.__step += 1\n # break\n if self.__step >= self.args.total_step:\n LOGGER.info('reached maximum step')\n return True\n return False", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def train_model(model, train_data, train_targets, epochs):\n history = model.fit(train_data, train_targets, epochs=epochs, \n batch_size=40, validation_split=0.15,verbose=False)\n \n return history", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)" ]
[ "0.84007376", "0.82546407", "0.7960908", "0.795539", "0.78864366", "0.7852404", "0.7852404", "0.7852404", "0.7852404", "0.7800227", "0.7787899", "0.7712295", "0.7680872", "0.76052165", "0.75889987", "0.75747776", "0.7558251", "0.7551875", "0.7530717", "0.75160545", "0.751391", "0.7510772", "0.7491973", "0.7482354", "0.7437734", "0.7436635", "0.7390493", "0.7368796", "0.7365185", "0.7364916", "0.7337105", "0.7336592", "0.7334481", "0.7308753", "0.73015386", "0.7299462", "0.729744", "0.72870404", "0.7286251", "0.72855055", "0.7282396", "0.7259324", "0.7252568", "0.7252568", "0.72497773", "0.72445023", "0.7230956", "0.7229756", "0.7222255", "0.722057", "0.7201675", "0.7193988", "0.719204", "0.71858406", "0.718201", "0.7180051", "0.717624", "0.7174599", "0.7163891", "0.71616", "0.7152758", "0.71507454", "0.71282995", "0.7123181", "0.7112253", "0.7111674", "0.7111674", "0.7111674", "0.7111674", "0.7111674", "0.71069497", "0.7100292", "0.70960987", "0.7095062", "0.70872563", "0.7080721", "0.7077828", "0.70659417", "0.70638645", "0.7050459", "0.7050278", "0.70491654", "0.70430356", "0.7041087", "0.7032256", "0.7022008", "0.7015502", "0.7011454", "0.7008452", "0.7008049", "0.7003323", "0.70013815", "0.70008475", "0.699931", "0.69961977", "0.6989534", "0.6989247", "0.6988947", "0.69750804", "0.6969881" ]
0.71643716
58
Forward pass one batch of the data with the model.
def forward_one_batch(self, data, inference=False): inputs = data['img'] labels = data.get('label', None) inputs = inputs.cuda() outputs = self.model(inputs) losses_report = None if not inference: labels = labels.cuda() losses_report = self.compute_losses(outputs, labels) return losses_report, outputs.detach().cpu().numpy(), labels.detach( ).cpu().numpy() if labels is not None else labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def forward_once(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 512) # reshpe it into (batch_size, feature_dimention)\n return x", "def forward_batch(model, batch, device):\n inputs, labels = (batch, batch)\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs = model(inputs, labels=labels)\n\n return outputs[:2]", "def forward(self, batch):\n self.output = np.dot(np.array(batch), self.weights) + self.biases", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n #delete all cts\n #self.cts = [self.cts[-1]]\n \n #forward\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def on_iter_forward(self, runner):\n # unpack features into features and targets\n *features, target = runner.batch\n # Forward features\n runner.output = runner.model(*features)\n # Ensure `targetL` and `outputL` are always in a list format.\n targetL = [target] if not isinstance(target, (list, tuple)) else target\n outputL = [runner.output] if not isinstance(runner.output, (list, tuple)) else runner.output\n # Compute loss\n runner.loss = runner.criterion(*outputL, *targetL)\n runner.target = target", "def forward(self, x):\n\n out = self.model(x)\n\n return out", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train(self, batch):\n pass", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def train_on_batch(model,\n\t\t\t batch_of_x,\n\t\t\t batch_of_y,\n\t\t\t optimizer):\n model.zero_grad()\n\n loss = model.loss(batch_of_x, batch_of_y)\n\n loss.backward()\n\n optimizer.step()\n\n return", "def forward(self, X, training=False):\n pass", "def train_single_batch(self, batch_data):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n norm_adj = self.norm_adj\n ua_embeddings, ia_embeddings = self.model.forward(norm_adj)\n\n batch_users, pos_items, neg_items = batch_data\n\n u_g_embeddings = ua_embeddings[batch_users]\n pos_i_g_embeddings = ia_embeddings[pos_items]\n neg_i_g_embeddings = ia_embeddings[neg_items]\n\n batch_mf_loss, batch_reg_loss = self.loss_comput(\n u_g_embeddings,\n pos_i_g_embeddings,\n neg_i_g_embeddings,\n batch_users,\n pos_items,\n neg_items,\n )\n\n batch_loss = batch_mf_loss + batch_reg_loss\n\n batch_loss.backward()\n self.optimizer.step()\n loss = batch_loss.item()\n return loss", "def forward(self, batch):\n # Convolutional layers\n batch = self.conv1(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n batch = self.conv2(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n # Flatten\n batch = batch.reshape(batch.shape[0], -1)\n # Fully connected layers\n batch = self.fc1(batch)\n batch = self.dropout(batch)\n batch = self.fc2(batch)\n batch = torch.sigmoid(batch)\n return batch", "def forward(self, data, input):\n output = scatter_add(input, data.batch, dim=0, dim_size=data.num_graphs)\n return output", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def _forward(self,\n batch_inputs: dict,\n batch_data_samples: OptSampleList = None) -> Tensor:\n pass", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, batch: torch.Tensor) -> torch.Tensor:\n x = self.conv1(batch)\n x = self.bn1(x)\n x = self.prelu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.bn2(x)\n x = self.dropout(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n x = self.features(x)\n\n if self.remove_bad_faces:\n # Remove bad quality faces, setting them to NaN\n x[torch.norm(x, dim=1) < self.magnitude_threshold] = float(\"nan\")\n\n return x", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward_batch(self,batcher,phase=0):\n mapped_results={}\n inputs=batcher.get_batched_input(mapper=self)\n for type_ in inputs.keys():\n mapper = self.mappers[type_]\n\n mapped_results[type_] = mapper.forward_batch(inputs[type_],phase=0)\n return mapped_results", "def prepare_next_batch(self) -> None:\n if not (\n self._forward_succesful and self._clip_succesful and self._noise_succesful\n ):\n raise RuntimeError(\n \"An error occured during model training. The model.prepare_next_batch() \"\n \" method must be called after model.forward(), model.clip_and_accumulate() \"\n \" and model.noise_gradient().\"\n )\n for model in self.models:\n for target_param, source_param in zip(\n model.parameters(), self.wrapped_model.parameters()\n ):\n target_param.data = source_param.data\n self._steps_taken += 1\n self._forward_succesful = self._clip_succesful = self._noise_succesful = False\n if self.watchdog:\n self.watchdog.inform(self._steps_taken)", "def forward(self, *args, mode=\"train\", **kwargs):\n raise NotImplementedError", "def forward_step(self, batch):\n input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device)\n attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device)\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]\n\n _, _, num_label = outputs.shape\n \"\"\"\n outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim)\n labels : (batch, seq_length) => (seq_length,)\n \"\"\"\n outputs = outputs.view(-1, num_label)\n labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1)\n batch_losses = self.criterion(outputs, labels)\n loss = torch.mean(batch_losses) # mean average\n self.batch_output = [input_ids, outputs]\n return loss", "def forward(self, X, batch_size):\n\n z = self.neural_net_forward(X.view(-1, self.n_hos * self.n_types)) # [batch_size, n_structures]\n\n x_1 = self.linear_program_forward(X, z, batch_size)\n\n return x_1", "def model_forward_pass(self, data):\n for key, value in data.items():\n data[key] = value.to(self.device)\n \n if self.fp16:\n with torch.cuda.amp.autocast():\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n else:\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n\n return output, loss", "def train_single_batch(self, batch_data, ratings=None):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n loss = self.model.forward(batch_data)\n loss.backward()\n self.optimizer.step()\n loss = loss.item()\n return loss", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, data, input):\n output = scatter_mean(input, data.batch, dim=0, dim_size=data.num_graphs)\n return output", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self,bottom,top):\n # imgmaps = self.batch_loader.mixup_gen()\n # print(\"DataLayer forward!!\")\n trainX, trainY = self.batch_loader.batch_imgs()\n # print(\"trainX:\",trainX.shape)\n # print(\"trainY:\",trainY.shape)\n # print(\"trainY:\", trainY)\n # print(\"top[0].data.shape:\",top[0].data.shape)\n # print(\"top[1].data.shape:\", top[1].data.shape)\n top[0].data[:, ...] = trainX\n top[1].data[:, ...] = trainY\n # print(\"DataLayer forward!!\")", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, x: Tensor) -> Any: # type: ignore[override]\n return self.model(x)", "def forward(self, input):\n batch_size = input.size(0)\n shape = (batch_size, *self.shape)\n out = input.reshape(shape)\n return out", "def forward_batch(self, *args, batchsize=16, retain_inputs=False,\n calc_score=False, converter=concat_examples):\n # data may be \"train_x array\" or \"chainer dataset\"\n data = args[0]\n data, _ = self._check_X_y(data)\n\n input_list = None\n output_list = None\n total_score = 0\n for i in range(0, len(data), batchsize):\n inputs = converter(data[i:i + batchsize], device=self.device)\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n #print('forward batch inputs', len(inputs), inputs)\n #print('forward batch inputs', len(inputs[0]))\n outputs = self._forward(*inputs, calc_score=calc_score)\n if not isinstance(outputs, tuple):\n outputs = (outputs,)\n # Init\n if retain_inputs:\n if input_list is None:\n input_list = [[] for _ in range(len(inputs))]\n for j, input in enumerate(inputs):\n input_list[j].append(cuda.to_cpu(input))\n if output_list is None:\n output_list = [[] for _ in range(len(outputs))]\n for j, output in enumerate(outputs):\n # print(j, 'output', type(output), output.shape)\n output_list[j].append(cuda.to_cpu(output.data))\n if calc_score:\n # switch accuracy or loss depends on situation.\n if self.compute_accuracy:\n total_score += self.accuracy * outputs[0].shape[0]\n else:\n total_score += self.loss * outputs[0].shape[0]\n\n if retain_inputs:\n self.inputs = [numpy.concatenate(input) for input in input_list]\n if calc_score:\n self.total_score = cuda.to_cpu(total_score.data) / len(data)\n\n result = [numpy.concatenate(output) for output in output_list]\n if len(result) == 1:\n return result[0]\n else:\n return result", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out.view(-1, 1).squeeze(1)", "def predict_batch(self, model, context, data=None):\n pass", "def train_next_batch(self, batch_size=None):", "def handle_batch(self, batch: Mapping[str, Any]) -> None:\n self.batch = {**batch, **self.forward(batch)}", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)", "def forward(self, *inputs):\n raise NotImplementedError", "def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:\n output = self._process_input(batch, **kwargs)\n output = self._process_output(output)\n return output", "def forward_once(self, x):\n output = self.cnn1(x)\n output = output.view(output.size()[0], -1)\n output = self.fc1(output)\n return output", "def forward(self, X):\n return self.sample_predict(X, 1)", "def _batch_iter(self, source, target, i: int):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n # the result and loss\n result = self.model(source)\n loss = self.criterion(result, target)\n\n # optimization and backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # update the loss\n self.epoch_loss.update(loss.item(), source.size(0))\n\n # print the information\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Batch: { i } | loss: { self.epoch_loss.avg }\", end=\"\")\n\n # clean the data\n del source, target\n\n return result", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n\n return out.view(-1)", "def forward_backward(self, data_batch):\n total_feature, total_label = self.forward(data_batch, is_train=True)\n self.backward_all(total_feature, total_label)", "def forward(self, obs):\n raise NotImplementedError", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def batch_forward(model, images, max_batch=MAX_BATCH):\n N = images.shape[0]\n nbatchs = ceil(N / max_batch)\n pred_list = []\n\n with torch.no_grad():\n for i in range(nbatchs):\n pred_list.append(model(images[i * max_batch: (i + 1) * max_batch]))\n return torch.cat(pred_list, dim=0)", "def forward(self, x):\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n return self.net(x)", "def forward(self, input_ids):\n # pdb.set_trace()\n batch_size, binary_max_size, d_model = input_ids.shape\n\n inserted = torch.clone(self.inserted_vector)\n inserted = inserted.expand(batch_size, 1, -1)\n\n batch = torch.cat((inserted, input_ids), dim=1)\n\n batch = batch.permute(1, 0, 2).contiguous()\n # tmp `(binary_max_size + 1, batch_size, d_model)`\n tmp = self.transformer(batch)\n tmp = tmp.permute(1, 0, 2).contiguous()\n\n return self.top_headlayer(tmp[:, 0, :])", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n x = self.sparseModel(x)\n x = self.linear(x)\n return x", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def forward(self, obs):\n\t\tpass", "def _forward(self,\n batch_inputs_dict: dict,\n data_samples: OptSampleList = None,\n **kwargs) -> Tuple[List[torch.Tensor]]:\n x = self.extract_feat(batch_inputs_dict)\n results = self.bbox_head.forward(x)\n return results", "def forward(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n ###############################\n # Implement forward pass here #\n ###############################\n pass", "def __feedforward(self, X):\n A = X\n for layer in self.layers:\n layer._Dense__forward(A)\n A = layer.A\n return A", "def forward(self, inputs):\n if Dropout.train_flag:\n binomial = bn.Binomial(probs=self.p)\n self.data = binomial.sample(inputs.size())\n return inputs * self.data * (1.0/(self.p))\n return inputs", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def benchmark_synth_forward_batch1(self):\n params = self._shared_params()._replace(batch_size=1)\n self._run_benchmark(params)", "def feed_forward(self, inputs):\n raise NotImplementedError()", "def forward_train(self, *args, **kwargs):\n raise NotImplementedError('This interface should not be used in current training schedule. Please use `train_step` for training.')", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def infer_batch(self, input_seq, logger):\r\n return self.forward(input_seq, None)", "def forward(self, x):\n x_shape = x.data.shape\n bias_array= np.zeros(x_shape, dtype=np.float32)\n for i in range(self.bias.data.shape[0]):\n bias_array[:,i,:,:]= self.bias.data[i]\n bias_tensor = from_numpy(bias_array).cuda()\n x.data +=bias_tensor\n return x", "def forward(self, x, y=None):\n output = self.model(x, y)\n return output", "def single_step(self):\n # Make a minibatch of training data by choosing \"batch_size\" elements with replacement\n num_train = self.X_train.shape[0]\n batch_mask = np.random.choice(num_train, self.batch_size) # random choice with replacement\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.latest_loss = loss\n\n # Perform a parameter update based on chosen optimiser\n for p, w in self.model.params.items():\n dw = grads[p] # current gradients\n config = self.optim_configs[p] # moments of gradients and learning rate till previous accuracy() call\n next_w, next_config = optimiser_type(self.optim_type, w, dw, config) # sent to choice of optimising technique\n self.model.params[p] = next_w # model params updated\n self.optim_configs[p] = next_config # # moments of gradients updated", "def forward(self, x):\n pass", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def forward(self, batch):\n # Apply first convolution, followed by ReLU non-linearity; \n # use batch-normalization on its outputs\n batch = func.relu(self.conv1(self.conv1_normed(batch)))\n batch = func.relu(self.one1(self.one1_normed(batch)))\n \n # Apply conv2 and conv3 similarly\n batch = func.relu(self.conv2(self.conv2_normed(batch)))\n batch = func.relu(self.one2(self.one2_normed(batch)))\n batch = func.relu(self.conv3(self.conv3_normed(batch)))\n batch = func.relu(self.one3(self.one3_normed(batch)))\n \n \n # Pass the output of conv3 to the pooling layer\n batch = self.pool(batch)\n\n # Reshape the output of the conv3 to pass to fully-connected layer\n batch = batch.view(-1, self.num_flat_features(batch))\n \n # Connect the reshaped features of the pooled conv3 to fc1\n batch = func.relu(self.fc1(batch))\n \n # Connect fc1 to fc2 - this layer is slightly different than the rest (why?)\n batch = self.fc2(batch)\n\n\n # Return the class predictions\n #TODO: apply an activition function to 'batch'\n return func.sigmoid(batch)", "def forward(self, input_):\n if isinstance(input_, list) or isinstance(input_, tuple):\n return self.forward_batched_3d(input_)\n else:\n return self.forward_batched_2d(input_)", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def forward(self, x, **kwargs):\n pass", "def forward(self, X):\r\n # input layer\r\n self.ff[0] = X\r\n # hidden layer\r\n for x in range(1, np.shape(self.ff)[0]-1):\r\n self.ff[x] = self.hid_transfer(self.weights[x-1].dot(self.ff[x-1]) + self.bias[x-1])\r\n # output layer\r\n self.ff[-1] = self.out_transfer(self.weights[-1].dot(self.ff[-2]) + self.bias[-1])", "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n assert input.shape[1] == self.n_neurons, \"The shape of the input tensor is not correct.\"\n\n bn_fct = CustomBatchNormManualFunction()\n out = bn_fct.apply(input, self.gamma, self.beta, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def on_batch(self, x, y):", "def forward(self, x):\n x = self.features(x)\n return x", "def forward(self, input_):\n data = self.seq(input_)\n return data", "def forward(self, *inputs) -> torch.Tensor:\n return self.model(*inputs)", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def __train_batch(self, x, y):\n self.reset()\n\n for index, batch in enumerate(x):\n self.predict(batch, dropout_probability=self.dropout_probability)\n self.out_layer.loss(y[index])\n\n # increment hit rate if, well, hit\n if m.get_max_index(self.out_layer.predicted) == m.get_max_index(y[index]):\n self.hit_count += 1.0\n\n # calculate batch loss\n self.batch_loss += (self.out_layer.cost / len(x))\n\n # calculate all delta\n self.out_layer.calculate_delta()\n\n # update weights\n self.in_layer.update(momentum_parameter=self.momentum_parameter)", "def forward_pass(self):\n # Compute the support set's mean and var and use these as the moments for\n # batch norm on the query set.\n train_embeddings = self.embedding_fn(self.episode.train_images,\n self.is_training)\n self.train_embeddings = train_embeddings['embeddings']\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = train_embeddings['moments']\n test_embeddings = self.embedding_fn(\n self.episode.test_images,\n self.is_training,\n moments=support_set_moments,\n backprop_through_moments=self.backprop_through_moments)\n self.test_embeddings = test_embeddings['embeddings']", "def forward(self, x, feature_matching=False):\n x = x.view(-1, x.size(1), 1, 1)\n x = self.model(x)\n return self.output(x)", "def forward_pass_single_batch(self, inputs, model = None, return_hiddens = False, linear_output = False):\n if model == None:\n model = self.model\n num_observations = inputs.size\n hiddens = model.weights['visible_hidden'][(inputs),:]\n hiddens[:1,:] += self.weight_matrix_multiply(model.init_hiddens, model.weights['hidden_hidden'], model.bias['hidden'])\n# np.clip(hiddens[0, :], a_min = 0.0, out = hiddens[0, :])\n expit(hiddens[0,:], hiddens[0,:])\n \n for time_step in range(1, num_observations):\n hiddens[time_step:time_step+1,:] += self.weight_matrix_multiply(hiddens[time_step-1:time_step,:], model.weights['hidden_hidden'], model.bias['hidden'])\n# np.clip(hiddens[time_step, :], a_min = 0.0, out = hiddens[time_step, :])\n expit(hiddens[time_step,:], hiddens[time_step,:]) #sigmoid\n \n if 'visible_output' in model.weights:\n outputs = self.forward_layer(hiddens, model.weights['hidden_output'], model.bias['output'], model.weight_type['hidden_output'],\n model.weights['visible_output'])\n else:\n outputs = self.forward_layer(hiddens, model.weights['hidden_output'], model.bias['output'], model.weight_type['hidden_output'])\n \n if return_hiddens:\n return outputs, hiddens\n else:\n del hiddens\n return outputs", "def forward(self, x):\n batch_size = x.shape[0]\n x = x.mean(dim=-1).mean(dim=-1)\n init_pose = self.init_pose.expand(batch_size, -1)\n init_shape = self.init_shape.expand(batch_size, -1)\n init_cam = self.init_cam.expand(batch_size, -1)\n pred_pose = init_pose\n pred_shape = init_shape\n pred_cam = init_cam\n for _ in range(self.n_iter):\n xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)\n xc = self.fc1(xc)\n xc = self.drop1(xc)\n xc = self.fc2(xc)\n xc = self.drop2(xc)\n pred_pose = self.decpose(xc) + pred_pose\n pred_shape = self.decshape(xc) + pred_shape\n pred_cam = self.deccam(xc) + pred_cam\n pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)\n out = pred_rotmat, pred_shape, pred_cam\n return out", "def forward(self, input):\n\n # transform input into matrix of vectors [Batch, 784]\n input = input.view(-1, 784).to(self.device)\n\n # full pass\n mean, std = self.encoder(input)\n z = self.reparameterize(mean, std)\n output = self.decoder(z)\n\n # calculate each part of the loss\n reg_loss = self.reg_loss(mean, std)\n recon_loss = self.recon_loss(input, output)\n\n # avarage over batch\n average_negative_elbo = torch.mean(reg_loss + recon_loss, dim=0)\n return average_negative_elbo" ]
[ "0.83496284", "0.83496284", "0.76277405", "0.7380476", "0.6854552", "0.6846691", "0.68106997", "0.67724216", "0.67681694", "0.673687", "0.67066", "0.665859", "0.66289693", "0.65885663", "0.65885663", "0.65885663", "0.6574135", "0.65682745", "0.6561686", "0.6558493", "0.65480936", "0.65447515", "0.6529728", "0.6476651", "0.64668775", "0.64668775", "0.6456558", "0.64467347", "0.64467347", "0.64342207", "0.64275366", "0.6420791", "0.6411026", "0.6398283", "0.63957906", "0.6386436", "0.63714266", "0.6366538", "0.6360533", "0.63602084", "0.63590336", "0.63407326", "0.6328112", "0.63196605", "0.631507", "0.6309354", "0.6307667", "0.6303298", "0.62887824", "0.6287309", "0.6283755", "0.6278196", "0.6252965", "0.62488043", "0.6246375", "0.6239652", "0.620057", "0.6198485", "0.61898494", "0.6189029", "0.6183984", "0.6183154", "0.61824185", "0.6165785", "0.6165118", "0.6163998", "0.61560154", "0.61548734", "0.61517566", "0.6151625", "0.61438614", "0.61434305", "0.6133229", "0.61304826", "0.61288786", "0.61272717", "0.61250794", "0.61199653", "0.6112391", "0.61084676", "0.61084676", "0.6102331", "0.6099532", "0.6096481", "0.60928875", "0.6089474", "0.6087183", "0.60837585", "0.6082731", "0.6081557", "0.60812616", "0.6080372", "0.6076659", "0.6074957", "0.6071355", "0.6051624", "0.60404694", "0.60372853", "0.6030695", "0.6028458" ]
0.6389994
35
Save the training checkpoint to the disk.
def save_checkpoint(self): if not self.save_ckpt: return lookup = None is_best = False checkpoint = self.create_checkpoint() # save best only or not? if self.save_best_only: if self.valid_dataloader: for item in [self.valid_metric_meters, self.valid_loss_meters]: if self.primary_indicator in item: lookup = item else: for item in [self.train_metric_meters, self.train_loss_meters]: if self.primary_indicator in item: lookup = item if lookup: value = lookup[self.primary_indicator].avg if self.best_mode == 'min': if value < self.best_indicator: self.best_indicator = value is_best = True else: if value > self.best_indicator: self.best_indicator = value is_best = True # TODO: better naming convention if self.valid_dataloader: metric_string = '-'.join([ f'{metric}-[{self.valid_metric_meters[metric].avg:.5f}]' for metric in self.valid_metric_meters ]) loss_string = '-'.join([ f'{loss}-[{self.valid_loss_meters[loss].avg:.5f}]' for loss in self.valid_loss_meters ]) else: metric_string = '-'.join([ f'{metric}-[{self.train_metric_meters[metric].avg:.5f}]' for metric in self.train_metric_meters ]) loss_string = '-'.join([ f'{loss}-[{self.train_loss_meters[loss].avg:.5f}]' for loss in self.train_loss_meters ]) # TODO: use config for paths # make subdir folder = Path(self.save_path, str(self.fold_idx)) folder.mkdir(parents=True, exist_ok=True) if not self.save_best_only or (self.save_best_only and is_best): torch.save(checkpoint, f'{folder}/ep-[{self.epoch}]-iter-[{self.iter}]-{loss_string}-{metric_string}.pth')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')", "def save(self, checkpoint_path: str):\r\n raise NotImplementedError", "def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")", "def save_checkpoint(self, checkpoint_path='checkpoint.pth'):\n # Move the model back to the cpu so it can be loaded onto machines\n # without gpu's as well.\n self.model.to('cpu')\n\n checkpoint = {\n 'model_architecture': self.model_architecture,\n 'input_size': self.input_size,\n 'output_size': self.output_size,\n 'hidden_layers': self.hidden_layers,\n 'learn_rate': self.learn_rate,\n 'drop_p': self.drop_p,\n 'class_to_idx': self.model.class_to_idx,\n 'current_epoch': self.model.current_epoch,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'model_state_dict': self.model.state_dict()\n }\n torch.save(checkpoint, checkpoint_path)", "def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))", "def checkpoint(self):\n save()", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n torch.save(self.model.state_dict(), path)", "def save_checkpoint(self, filename='checkpoint.pth'):\n torch.save(self.state_dict(), filename)", "def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)", "def save_session(self):\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n current_step = tf.train.global_step(self.session, self.global_step)\n path = self.saver.save(self.session, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))", "def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))", "def save_checkpoint(self, epoch: int) -> Path:\n logging.getLogger().disabled = True\n model_state_dict = self.model.module.state_dict() \\\n if isinstance(self.model, torch.nn.DataParallel) else self.model.state_dict()\n checkpoint_file_path = self.config.get_path_to_checkpoint(epoch)\n checkpoint_file_path.parent.mkdir(exist_ok=True, parents=True)\n info_to_store = {\n ModelAndInfo.EPOCH_KEY: epoch,\n ModelAndInfo.MODEL_STATE_DICT_KEY: model_state_dict,\n ModelAndInfo.OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict()\n }\n if self.config.compute_mean_teacher_model:\n assert self.mean_teacher_model is not None # for mypy, getter has this built in\n mean_teacher_model_state_dict = self.mean_teacher_model.module.state_dict() \\\n if isinstance(self.mean_teacher_model, torch.nn.DataParallel) \\\n else self.mean_teacher_model.state_dict()\n info_to_store[ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY] = mean_teacher_model_state_dict\n\n torch.save(info_to_store, checkpoint_file_path)\n logging.getLogger().disabled = False\n logging.info(f\"Saved model checkpoint for epoch {epoch} to {checkpoint_file_path}\")\n return checkpoint_file_path", "def save_checkpoint(model, save_path):\n torch.save(model.state_dict(), save_path)", "def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return", "def _save_checkpoint(self, epoch, is_best=False):\n arch = type(self.model).__name__\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'config': self.config\n }\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger.info(\"Saving checkpoint: {} ...\".format(filename))\n if is_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger.info(\"Saving current best: model_best.pth ...\")", "def saveCheckpoint(acc, epoch, model, train_hist):\r\n print('Saving..')\r\n state = {\r\n 'model': model,\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state(),\r\n 'train_hist': train_hist\r\n }\r\n if not os.path.isdir('checkpoint'): # save to checkpoint directory\r\n os.mkdir('checkpoint')\r\n torch.save(state, './checkpoint/ckpt' + '_' + str(epoch+1))", "def save(self):\n\n pattern = '{}_{}_{}ep.pt' if self.checkpoint_filename_pattern is None else self.checkpoint_filename_pattern\n filename = pattern.format('sherlock1', time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.monitors['loss_train'].num_epochs)\n full_filename = self.full_path(filename)\n c = {\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitors': self.monitors,\n 'parent': self.parent,\n 'args': vars(args) # convert args to dict\n }\n torch.save(c, full_filename)\n if not args.tuning and args.delete and self.last_checkpoint is not None:\n os.remove(self.last_checkpoint)\n self.last_checkpoint = full_filename\n return filename", "def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)", "def save_checkpoint(self, name=''):\n self.checkpoint_path.mkdir(exist_ok=True)\n if name:\n path = self.checkpoint_path / f'{name}_{self.epoch}.tar'\n else:\n path = self.checkpoint_path / f'{self.epoch}.tar'\n torch.save(self.get_state(), path)", "def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save_checkpoint(filename, model, state=None):\n if not state:\n torch.save(model.state_dict(), os.path.join('checkpoints/', filename))\n else:\n _state = {\n 'epoch': state['epoch'],\n 'state_dict': state['state_dict'].state_dict(),\n 'optimizer': state['optimizer'].state_dict()\n }\n\n torch.save(_state, os.path.join('checkpoints/', filename))", "def save_checkpoint(state, is_best, epoch, args, filename='checkpoint.pth'):\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n filename = args.save_folder + str(epoch) + '_' + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, args.save_folder + 'model_best.pth')", "def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path", "def train_and_save(self, checkpoint_dir):\n dataset = self._read_dataset(self._train_dataset_path)\n features, labels = self.get_features_and_labels(dataset)\n self._model.partial_fit(features, labels, classes=self._classes)\n checkpoint_path = self._save_model(checkpoint_dir)\n return checkpoint_path", "def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)", "def save_ckpt(self, name=None):\r\n if name is None:\r\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.epoch))\r\n print(\"Checkpoint saved at {}\".format(save_path))\r\n else:\r\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if isinstance(self.net, nn.DataParallel):\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.module.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n else:\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n self.net.cuda()", "def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)", "def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint", "def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint", "def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)", "def save(self, checkpoint_dir, step):\n model_name = \"CNN.model\"\n model_dir = \"%s\" % (\"cnn\")\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)", "def save_checkpoint(self, accuracy = None):\n state_dict = {\n 'epoch': self.epoch + 1,\n 'state_dict': self.model.state_dict(),\n 'optim_dict': self.optimizer.state_dict()\n }\n torch.save(state_dict,\n os.path.join(self.checkpoints_path,\n \"last.pth\".format(accuracy)))\n if accuracy is not None and accuracy > self.best_accuracy:\n if self.best_accuracy > 0:\n os.remove(\n os.path.join(\n self.checkpoints_path,\n \"best_{acc:.4f}.pth\".format(acc=self.best_accuracy)\n )\n )\n self.best_accuracy = accuracy\n torch.save(state_dict,\n os.path.join(self.checkpoints_path,\n \"best_{acc:.4f}.pth\".format(acc=accuracy)))\n self.best_accuracy = accuracy", "def save_checkpoint(filename, epoch, model, optimizer=None, best_score=0):\n torch.save({\n 'model' : model.state_dict(),\n 'optim' : optimizer.state_dict() if optimizer is not None else None,\n 'epoch' : epoch,\n 'best_score' : best_score\n }, filename)", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n f\"Checkpoint directory does not exists. Creating {checkpoint_dir}\")\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, f'epoch{state[\"epoch\"]}_checkpoint.pytorch')\n log_info(f\"Saving last checkpoint to '{last_file_path}'\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(f\"Saving best checkpoint to '{best_file_path}'\")\n shutil.copyfile(last_file_path, best_file_path)", "def save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)", "def save_checkpoint(state, is_best, checkpoint_dir):\n\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n shutil.copyfile(last_file_path, best_file_path)", "def save_checkpoint(state, is_best, filename='checkpoint/chpt.tar'):\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save_checkpoint(self, model, optimizers):\n\n def _save(path, model, optimizers):\n if not os.path.exists(path):\n os.makedirs(path)\n # TODO: put everything on CPU first\n torch.save(model.state_dict(), os.path.join(path, 'model.ckpt'))\n torch.save(tuple([optimizer.opt.state_dict() for optimizer in optimizers]),\n os.path.join(path, 'opt.ckpt'))\n\n if (self.epoch % self._save_iter) == 0:\n # we're at a save iteration\n ckpt_path = os.path.join(self.log_path, 'checkpoints', str(self.epoch))\n _save(ckpt_path, model, optimizers)\n\n if self._best_epoch:\n # overwrite the best model\n ckpt_path = os.path.join(self.log_path, 'checkpoints', 'best')\n _save(ckpt_path, model, optimizers)\n self._best_epoch = False", "def _save_model(self, checkpoint_dir):\n # Check whether the specified path exists or not\n isExist = os.path.exists(checkpoint_dir)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(checkpoint_dir)\n\n filename = self._get_checkpoint_name()\n path = checkpoint_dir + filename\n\n # Serialize the model checkpoint in to a Python Pickle file\n with open(path, 'wb') as f:\n pickle.dump(self._model, f)\n return path", "def save_checkpoint(self, filename, extra_state):\n if distributed_utils.is_master(self.args): # only save one checkpoint\n utils.save_state(\n filename, self.args, self.get_model(), self.criterion, self.optimizer,\n self.lr_scheduler, self._num_updates, self._optim_history, extra_state,\n )", "def write_checkpoint(self, checkpoint_id, best=False):\n assert self.output_dir is not None\n checkpoint_dir = os.path.join(self.output_dir, 'checkpoints')\n fname = self.get_model_fname(self.model)\n checkpoint_file = ''\n if best:\n checkpoint_file = 'model_checkpoint_%s.best.pth.tar' % ( fname )\n else:\n checkpoint_file = 'model_checkpoint_%s_%03i.pth.tar' % ( fname, checkpoint_id )\n os.makedirs(checkpoint_dir, exist_ok=True)\n torch.save(dict(model=self.model.state_dict()),\n os.path.join(checkpoint_dir, checkpoint_file))", "def save_checkpoint(self, session: tf.Session, global_step: int):\n _delete_old_checkpoints(str(self.info.checkpoint_path))\n _save_checkpoint(session, str(self.info.checkpoint_path),\n str(self.info.model_file), global_step)", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def save_checkpoint(args,state, is_best, filename=\"checkpoint.pth.tar\"):\n directory = \"runs/%s-net/\" % (args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n epoch = state['epoch']\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_best.pth.tar\")\n\n if epoch==0 or epoch==2:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_epoch_%d.pth.tar\" % epoch )", "def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n \"Checkpoint directory does not exists. Creatding {}\".format(checkpoint_dir))\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n log_info(\"Saving last checkpoint\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(\"Saving best checkpoint\")\n shutil.copyfile(last_file_path, best_file_path)", "def save_checkpoint(model, is_best, filename='./model/checkpoint.pth.tar'):\n if is_best:\n torch.save(model.state_dict(), filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save(self):\n try:\n torch.save(self.model.state_dict(), os.path.join(self.save_path, \"save_point.pth\"))\n except:\n print(\"Unable to save the model\")", "def back_up(self, epoch):\n K.set_value(self._ckpt_saved_epoch, epoch)\n # Save the model plus CKPT_SAVED_EPOCH variable.\n if self.write_checkpoint_manager.save():\n distributed_file_utils.remove_temp_dirpath(\n self.write_checkpoint_manager.directory,\n None) #self._model.distribute_strategy)", "def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n data = dict()\n data[\"inst\"] = \"save\"\n data[\"folder\"] = folder\n data[\"filename\"] = filename\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done", "def save(self, model_dir, step, epoch, is_best=False):\n if model_dir is None:\n return\n save_checkpoint(self._model, self._optimizer, step, epoch, model_dir,\n keep_every_n=self._keep_every_n, is_best=is_best)", "def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def save(self, sess):\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model')\n if not os.path.exists(self.model.ckpt_dir):\n os.makedirs(self.model.ckpt_dir)\n self.saver.save(sess, ckpt_path, global_step=self.gstep)", "def save(self, save_path: str) -> None:\n self._check_initialization()\n\n # save config\n self.save_config(save_path)\n\n # save model weights\n self.model.save(save_path)\n\n # save training set metadata\n training_set_metadata_path = os.path.join(save_path, TRAIN_SET_METADATA_FILE_NAME)\n save_json(training_set_metadata_path, self.training_set_metadata)", "def save_checkpoint(state, model_name=None):\n \n if not model_name: model_name = f\"model_date_{date_time_str}.pth\"\n torch.save(state, osj(out_path, model_name))", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"checkoutpoint/%s/\" % args.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'checkoutpoint/%s/' % args.name + 'model_best.pth.tar')", "def save_checkpoint(model, state, is_best, checkpoint):\n state_filepath = os.path.join(checkpoint, 'last.pth.tar')\n model_filepath = os.path.join(checkpoint, 'last_model.pth')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, state_filepath)\n torch.save(model, model_filepath)\n if is_best:\n shutil.copyfile(state_filepath, os.path.join(checkpoint, 'best.pth.tar'))\n shutil.copyfile(model_filepath, os.path.join(checkpoint, 'best_model.pth'))", "def _save_state(self, saver, session, data, checkpts_path):\n # Save variable state\n if checkpts_path:\n logging.info('Saving cotrain checkpoint at %s.', checkpts_path)\n saver.save(session, checkpts_path, write_meta_graph=False)\n\n # Save dataset state.\n if self.data_dir:\n logging.info('Saving self-labeled dataset backup.')\n data.save_state_to_file(self.data_dir)", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % (args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/' % (args.name) + 'model_best.pth.tar')", "def save_model_checkpoint(model, optimizer, global_step, epoch_info, file_name):\n output = {\n \"model\" : model.state_dict(),\n \"optimizer\" : optimizer.state_dict(),\n \"global_step\" : global_step + 1,\n \"epoch_info\" : epoch_info\n }\n torch.save(output, file_name)", "def save_checkpoint(model, epoch, checkpoint_dir, stats):\n state = {\n \"epoch\": epoch,\n \"state_dict\": model.state_dict(),\n \"stats\": stats,\n }\n\n filename = os.path.join(checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(epoch))\n torch.save(state, filename)", "def write_checkpoint(self):\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\")\n array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n self.file_checkpoint_data.write(','.join(array_to_write) + \"\\n\")\n self.file_checkpoint_data.flush()", "def save_checkpoint(dir, state, is_best, filename='checkpoint.pth.tar'):\n directory = \"%s/\" % (dir)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/' %\n (dir) + 'model_best.pth.tar')", "def save(self):\n torch.save(self.state_dict(), self.checkpoint_path)\n with open(self.config_path, 'w') as f:\n print(self, file=f)", "def _save(self, step):\n\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path,global_step=step)", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\"%(args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def save(self, checkpoint_dir, step):\n\n model_name = \"CSGAN.model\"\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)", "def save(self, save_path):\n self.graph.saver.save(self.sess, save_path)", "def save(self, save_path):\n self.graph.saver.save(self.sess, save_path)", "def _save(self, tmp_checkpoint_dir):\n checkpoint_path = os.path.join(tmp_checkpoint_dir, \"model_weights\")\n self.model.save_weights(checkpoint_path, save_format=\"tf\")\n return tmp_checkpoint_dir", "def save(self, fname, io=None):\n ckpt_path = self.manager.save()\n logging.info(f'Saved to {ckpt_path}')\n\n print_summary(self.model)\n\n if io is not None:\n io._upload_dir_to_bucket(self.save_path, self.save_path, ['ckpt', 'checkpoint'])", "def save_checkpoint(model, optimizer=None, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n\n print_debug('Saving checkpoint: ' + path)\n\n model = model.module if type(model) is torch.nn.DataParallel else model\n\n checkpoint = {\n 'model_state_dict': model.state_dict()\n }\n\n if optimizer is not None:\n checkpoint['optimizer_state_dict'] = optimizer.state_dict()\n\n torch.save(checkpoint, path)", "def save_checkpoint(state, is_best, file_path, file_name='checkpoint.pth.tar'):\n\n save_path = file_path + '/' + file_name\n torch.save(state, save_path)\n if is_best:\n shutil.copyfile(save_path, file_path + '/model_best.pth.tar')", "def save_checkpoint(self, path: str, **kwargs):\n if self.distributed:\n encoder = self.net_q.module.encoder\n head = self.net_q.module.head\n else:\n encoder = self.net_q.encoder\n head = self.net_q.head\n\n ckpt = {\n 'encoder': encoder.state_dict(),\n 'head': head.state_dict(),\n 'net_ps': self.net_ps.state_dict(),\n 'net_k': self.net_k.state_dict(),\n 'queue': self.queue.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n }\n if kwargs:\n ckpt.update(kwargs)\n torch.save(ckpt, path)", "def save(self, sess, save_path=\"./log/model.ckpt\", verbose=True):\n if(verbose): print(\"Saving model in: \" + str(save_path))\n save_path = self.tf_saver.save(sess, save_path)\n if(verbose): print(\"Done!\")", "def save_checkpoint(epoch, outdir, model, mapper, optimizer, criterion,\n filename='checkpoint.OWE.pth.tar'):\n filename = outdir / filename\n logger.info(\"Saving checkpoint to {}.\".format(filename))\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'mapper': mapper.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, str(filename))\n if max(criterion) == criterion[-1]:\n best_name = str(outdir / 'best_checkpoint.OWE.pth.tar')\n shutil.copyfile(str(filename), best_name)\n logger.info(\"Saved best checkpoint to {}.\".format(best_name))", "def save_checkpoint(workdir: str,\n train_state: TrainState,\n max_to_keep: int = 3,\n overwrite: bool = False):\n if jax.process_index() == 0:\n # Get train state from the first replica.\n checkpoint_state = jax.device_get(jax_utils.unreplicate(train_state))\n checkpoints.save_checkpoint(\n workdir,\n checkpoint_state,\n int(checkpoint_state.global_step),\n overwrite=overwrite,\n keep=max_to_keep)", "def save_checkpoint(state, is_best, filename=\"checkpoint.pth.tar\"):\n # only save from rank 0 process to avoid race condition\n rank = comm.get().get_rank()\n if rank == 0:\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth.tar\")", "def save_checkpoint(state: dict, is_best: bool, filename: str = 'checkpoint.pth.tar', args: Namespace = None):\n directory = f\"runs/{args.name}/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, f'runs/{args.name}/model_best.pth.tar')", "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n torch.save(state, filepath)\n # 如果是最好的checkpoint则以best为文件名保存\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)", "def save_checkpoint(self, filename, extra_state):\n self.call_async(0, '_async_save_checkpoint', filename=filename, extra_state=extra_state).gen()", "def _save(trainer, checkpoint_dir, state_dict_key_name, world_rank=None):\n\n # save current model parameters as a checkpoint\n makedir(checkpoint_dir)\n checkpoint_file_name = 'checkpoint{}.ortcp'.format('' if world_rank is None else str(world_rank))\n trainer.save_checkpoint(os.path.join(checkpoint_dir, checkpoint_file_name))\n state_dict = trainer.state_dict()\n with open(os.path.join(checkpoint_dir, state_dict_key_name+'.pkl'), \"wb\") as f:\n pickle.dump({state_dict_key_name : state_dict}, f)", "def save(self, model, ema_model, optimizer, epoch, step, best_wer,\n is_best=False):\n rank = 0\n if dist.is_initialized():\n dist.barrier()\n rank = dist.get_rank()\n\n if rank != 0:\n return\n\n # Checkpoint already saved\n if not is_best and epoch in self.tracked:\n return\n\n unwrap_ddp = lambda model: getattr(model, 'module', model)\n state = {\n 'epoch': epoch,\n 'step': step,\n 'best_wer': best_wer,\n 'state_dict': unwrap_ddp(model).state_dict(),\n 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict() if self.use_amp else None,\n }\n\n if is_best:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_best_checkpoint.pt\")\n else:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_epoch{epoch}_checkpoint.pt\")\n\n print_once(f\"Saving {fpath}...\")\n torch.save(state, fpath)\n\n if not is_best:\n # Remove old checkpoints; keep milestones and the last two\n self.tracked[epoch] = fpath\n for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):\n try:\n os.remove(self.tracked[epoch])\n except:\n pass\n del self.tracked[epoch]", "def save_checkpoint(self, fname, save_optimizer=True):\n # -- Set the network to the full MultiHead_Module network to save everything in the class not only the current model -- #\n self.network = self.mh_network\n\n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().save_checkpoint(fname, save_optimizer)\n\n # -- Set the flag in already_trained_on -- #\n if not self.already_trained_on[str(self.fold)]['checkpoint_should_exist']:\n # -- Set the flag to True -- #\n self.already_trained_on[str(self.fold)]['checkpoint_should_exist'] = True\n # -- Add the current head keys for restoring (should be in correct order due to OrderedDict type of heads) -- #\n self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'] = list(self.mh_network.heads.keys())\n # -- Add the current active task for restoring -- #\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'] = self.mh_network.active_task\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def save_checkpoint(ckpt_dir, model, optim, scheduler, epoch, global_step):\n states = {\n 'model': model.state_dict(),\n 'optim': optim.state_dict(),\n 'epoch': epoch,\n 'global_step': global_step\n }\n if scheduler is not None:\n states['scheduler'] = scheduler.state_dict()\n ckpt_path = os.path.join(ckpt_dir, '[ep-{:02d}]giter-{}.ckpt'.format(epoch, global_step))\n torch.save(states, ckpt_path)\n\n return ckpt_path", "def save_checkpoint(\n self, file_name: str, extra_state: Optional[Dict] = None\n ) -> None:\n checkpoint = {\n \"state_dict\": self.agent.state_dict(),\n \"config\": self.config,\n }\n if extra_state is not None:\n checkpoint[\"extra_state\"] = extra_state\n\n torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))", "def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')", "def save_checkpoint(self, checkpoint_dir: str) -> None:\n state = self.__getstate__()\n\n # Extract policy states from worker state (Policies get their own\n # checkpoint sub-dirs).\n policy_states = {}\n if \"worker\" in state and \"policy_states\" in state[\"worker\"]:\n policy_states = state[\"worker\"].pop(\"policy_states\", {})\n\n # Add RLlib checkpoint version.\n if self.config._enable_learner_api:\n state[\"checkpoint_version\"] = CHECKPOINT_VERSION_LEARNER\n else:\n state[\"checkpoint_version\"] = CHECKPOINT_VERSION\n\n # Write state (w/o policies) to disk.\n state_file = os.path.join(checkpoint_dir, \"algorithm_state.pkl\")\n with open(state_file, \"wb\") as f:\n pickle.dump(state, f)\n\n # Write rllib_checkpoint.json.\n with open(os.path.join(checkpoint_dir, \"rllib_checkpoint.json\"), \"w\") as f:\n json.dump(\n {\n \"type\": \"Algorithm\",\n \"checkpoint_version\": str(state[\"checkpoint_version\"]),\n \"format\": \"cloudpickle\",\n \"state_file\": state_file,\n \"policy_ids\": list(policy_states.keys()),\n \"ray_version\": ray.__version__,\n \"ray_commit\": ray.__commit__,\n },\n f,\n )\n\n # Write individual policies to disk, each in their own sub-directory.\n for pid, policy_state in policy_states.items():\n # From here on, disallow policyIDs that would not work as directory names.\n validate_policy_id(pid, error=True)\n policy_dir = os.path.join(checkpoint_dir, \"policies\", pid)\n os.makedirs(policy_dir, exist_ok=True)\n policy = self.get_policy(pid)\n policy.export_checkpoint(policy_dir, policy_state=policy_state)\n\n # if we are using the learner API, save the learner group state\n if self.config._enable_learner_api:\n learner_state_dir = os.path.join(checkpoint_dir, \"learner\")\n self.learner_group.save_state(learner_state_dir)", "def save(self, epoch=None, note=None):\n\n checkpoint_encoder = {\n 'type': \"transformer\",\n 'model': self.model.encoder.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_encoder['settings'].telegram:\n del checkpoint_encoder['settings'].telegram\n\n checkpoint_decoder = {\n 'type': \"transformer\",\n 'model': self.model.decoder.state_dict(),\n 'generator': self.model.generator.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_decoder['settings'].telegram:\n del checkpoint_decoder['settings'].telegram\n\n if not note:\n note = \"\"\n\n # make sure a path is specified prior to saving the files.\n if self.opt.save_model:\n ready_to_save = False\n if self.opt.save_mode == \"all\":\n model_name = \"_\" + str(note)\n ready_to_save = True\n else:\n # assumes self.opt.save_mode = \"best\"\n if self.valid_accs[-1] >= max(self.valid_accs):\n model_name = \"\"\n ready_to_save = True\n if self.opt.verbose:\n print(\n ' - [Info] The checkpoint file has been updated.')\n if ready_to_save:\n encoder_name = \"encoder\" + model_name + \".chkpt\"\n decoder_name = \"decoder\" + model_name + \".chkpt\"\n # setup directory to save this at.\n encoder_filepath = os.path.join(\n self.opt.directory, encoder_name)\n decoder_filepath = os.path.join(\n self.opt.directory, decoder_name)\n torch.save(checkpoint_encoder, encoder_filepath)\n torch.save(checkpoint_decoder, decoder_filepath)\n else:\n if not self.save_trip:\n if self.opt.verbose:\n print(\n \" - [Warning]: the model is not specified to save.\")\n self.save_trip = True", "def save(self, PATH):\n self._saver.save(self._sess, PATH)", "def checkpoint(self, state: TrainState): # pragma: no cover\n if self.checkpointing:\n if not have_tf: # Flax checkpointing requires tensorflow\n raise RuntimeError(\n \"Tensorflow not available and it is\" \" required for Flax checkpointing.\"\n )\n checkpoint_save(state, self.workdir)" ]
[ "0.8523187", "0.82452935", "0.8190791", "0.8153993", "0.81345206", "0.812798", "0.8117262", "0.80906636", "0.80737823", "0.79668844", "0.79027486", "0.77434766", "0.77273154", "0.7645351", "0.764001", "0.76386636", "0.7630027", "0.76060045", "0.75488526", "0.75404406", "0.75093985", "0.74962014", "0.7488619", "0.7482667", "0.7479034", "0.74642855", "0.7448441", "0.74333835", "0.7426602", "0.73777074", "0.7376148", "0.7374289", "0.736164", "0.7355449", "0.73272514", "0.7317282", "0.73155004", "0.7306538", "0.7298635", "0.72885674", "0.7287496", "0.7284827", "0.7281605", "0.7276099", "0.72708935", "0.72544295", "0.72528785", "0.72388124", "0.7232795", "0.7219696", "0.7216931", "0.7213238", "0.720291", "0.72000444", "0.71940833", "0.71893257", "0.7171778", "0.7169091", "0.7161212", "0.7153695", "0.7146596", "0.7146418", "0.7132139", "0.71285236", "0.7124893", "0.7122662", "0.71205956", "0.71079516", "0.7106885", "0.7105369", "0.70955884", "0.7093372", "0.70878875", "0.7060963", "0.7050234", "0.70499825", "0.70499825", "0.70439065", "0.7040911", "0.7025099", "0.7023363", "0.6980497", "0.6968714", "0.69588786", "0.69523084", "0.6947928", "0.69430935", "0.69413173", "0.69401157", "0.6921538", "0.6920495", "0.6913551", "0.691285", "0.69088984", "0.69068587", "0.68893576", "0.68851465", "0.68787926", "0.68667996", "0.6856025" ]
0.727966
43
max is 5 per city instead of like 6, 12 or 10 =(round((1 + (0.5(B41 1) / 5))B410.25, 2))12 so i think we take this formula and change the 5 to 4
def calculate_production_bonus(self, number_of_improvements, max_slots): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_score(location_list, grid, shape):", "def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n return maximum", "def biggest_city(g):\n max_size = None\n max_city = None\n \n for key in g.city_dict:\n if(g.city_dict[key].get_population() > max_size):\n max_size = g.city_dict[key].get_population()\n max_city = g.city_dict[key].get_name()\n \n \n \n return max_city, max_size", "def biggest_city(self):\r\n biggest = 0\r\n for code, node in self.vertices.items():\r\n if node.population > biggest:\r\n biggest = node.population\r\n city_code = node.code\r\n name = node.name\r\n return city_code, name, biggest", "def lambda_max(self):\n return const.b_wien / self.temperature", "def find_max_score_location(grid, shape):", "def cmax(self):\n return self[\"cmax\"]", "def get_max_cell_voltage(self): \n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? .*? (.*?) . .*? .*? . . . .*?'\n maxv = float(re.findall(pattern,summary).pop())\n return maxv", "def cmax(self):\n return self['cmax']", "def calculate_ucb_max(self, node):\n pass", "def branching_factor(data, loc):\n\n return 20", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def calculate_min_max_tiles(self):", "def maxim(self) -> (int, float('inf')):\n\t\treturn 2", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def compute_max(self):\r\n self.x_max = self.ox + self.dx*self.nx\r\n self.y_max = self.oy + self.dy*self.ny\r\n self.z_max = self.oz + self.dz*self.nz", "def get_max_lb(self):\n max_lb = 0\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"lower_bound\"] > max_lb:\n max_lb = self.arc_info[arc][\"lower_bound\"]\n return max_lb", "def find_largest_5_digit_number(digits):\r\n return max(int(digits[i:i + 5]) for i, v in enumerate(digits))", "def mamajek08_logRpHK_max():\n return -3.8918287373004357", "def get_bilan_conso(self):\n qs = self.get_cerema_cities().aggregate(bilan=Coalesce(Sum(\"naf11art21\"), float(0)))\n return qs[\"bilan\"] / 10000", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def max_mireds(self):\n return 333", "def mamajek08_logRpHK_Ro_max():\n return mamajek08_Ro_logRpHK(-5.0)", "def get_max_temp(self):\n self.max_temp = self.domain[1] * 2", "def maxi():\r\n st.dataframe(Data.style.highlight_max(axis=0))", "def __calc_max_speed(self, coordinates: List[Tuple[Point, Point]]) -> float:\n size_avg = (self.width + self.height) / 2\n return min(len(coordinates) * 12, 30) * min(1, 10 / self.__lane_length) * min(1, 5 / size_avg)", "def calcBlockMaxes(self):\n # restrict to fuel\n for k in self.p.paramDefs.inCategory(\"block-max\").names:\n try:\n maxVal = self.getMaxBlockParam(k.replace(\"max\", \"\"), Flags.FUEL)\n if maxVal != 0.0:\n self.p[k] = maxVal\n except KeyError:\n continue\n\n # add maxes based on pin-level max if it exists, block level max otherwise.\n # may want to use percentBuMax for pin-detailed cases.\n self.p.maxBuF = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(Flags.FEED | Flags.FUEL)\n ),\n default=0.0,\n )\n self.p.maxBuI = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(\n [\n Flags.IGNITER | Flags.FUEL,\n Flags.DRIVER | Flags.FUEL,\n Flags.STARTER | Flags.FUEL,\n ]\n )\n ),\n default=0.0,\n )", "def MINIMUM_BET() -> int:\n return 10", "def findmax(h5file, pcoord_dim, fi, li):\n max_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n maxv = numpy.max(pc[:,-1,pcoord_dim-1])\n max_values.append(maxv)\n maxmax = numpy.max(max_values)\n nw = numpy.where(max_values>(maxmax-maxmax*0.0001))\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n max_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmax = numpy.max(max_iter)\n nw2 = numpy.where(max_iter>(segmax-segmax*0.0001))\n seg_num = (nw2[0])[0]\n print (\"Maximum pcoord value for dimension\",pcoord_dim,\"is:\",segmax) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def max_temp(self):\n return 30", "def maxBw(array, value):\n m = np.max(array)\n c = value\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height, width)))\n for row in range(height):\n for col in range(width):\n new_array[row,col] = (array[row,col]/float(m)) * c\n return new_array", "def fMaxGasWaterRatio(Salinity, Temperature, Pressure):\n\tTemp = Temperature # Deg C\n\tPress = Pressure / 145.038 # MPa\n\tSal = Salinity\n\tA = log(0.712 * Press * ((abs(Temp - 76.71)) ** 1.5) + 3676 * (Press ** 0.64)) / log(10)\n\tB = -4 - 7.786 * Sal * (Temp + 17.78) ** -0.306\n\tC = A + B\n\treturn 10**C", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def fn(x):\n if x == 0: return 0\n if x < 0: return -inf \n return max(fn(x - c) * 10 + i + 1 for i, c in enumerate(cost))", "def max_temp(self):\n return 99", "def get_max_sum4(a):\n return max(get_max_sum2(a), 0)", "def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)", "def calculateTopCityCulture(self, x, y):\n\t\tiBestCityValue = 0\n\t\tpCurrent = gc.getMap().plot( x, y )\n\t\tif pCurrent.isCity():\n\t\t\tbestCity = pCurrent.getPlotCity()\n\t\t\tfor iPlayerLoop in range(gc.getMAX_PLAYERS()):\n\t\t\t\tapCityList = PyPlayer(iPlayerLoop).getCityList()\n\t\t\t\tfor pCity in apCityList:\n\t\t\t\t\tiTotalCityValue = pCity.GetCy().getCultureTimes100(pCity.getOwner())\n\t\t\t\t\tif iTotalCityValue > iBestCityValue:\n\t\t\t\t\t\tbestCity = pCity\n\t\t\t\t\t\tiBestCityValue = iTotalCityValue\n\t\t\treturn bestCity\n\t\treturn -1", "def getUmidadeArMax(self):\n return str(self.getWeather('umid-max')[:2]) + '%'", "def state_max(self) -> float:\n raise NotImplementedError", "def getCityLimitsBoundingBox(city, expandBy=0.0):\n url = \"https://nominatim.openstreetmap.org/search?city={}&format=json&addressdetails=1&limit=1\".format(city)\n r = requests.get(url=url)\n bbox_coords = r.json()[0]['boundingbox']\n top = [float(bbox_coords[3]), float(bbox_coords[1])]\n right = top\n bot = [float(bbox_coords[2]), float(bbox_coords[0])]\n left = bot\n\n # enlarge bounding box in order to include the entire neighborhood of the respectve pilot \n if expandBy > 0.0:\n diff_y = top[1]-bot[1]\n top[1] = top[1]+(diff_y*expandBy)\n bot[1] = bot[1]-(diff_y*expandBy)\n diff_x = left[0]-right[0]\n right[0] = right[0]+(diff_x*expandBy)\n left[0] = left[0]-(diff_x*expandBy)\n return top, right, bot, left", "def bern_max_metric(pipe_diameter, delta_p):\n\n rho = 1000 # density of water kg/m^3\n flow_rate_max = ((math.pi * (pipe_diameter**2)) / 4) * math.sqrt((2 * delta_p) / rho)\n\n return flow_rate_max", "def maximum_population(population_list, location):\n maximum = population_list[1][location]\n state = population_list[0][0]\n for lists in population_list[1:]:\n if lists[location] > maximum:\n maximum = lists[location]\n state = lists[0]\n return maximum,state", "def calculateTopCityPopulation(self, x, y):\t\t\n\t\tiBestCityValue = 0\n\t\tpCurrent = gc.getMap().plot( x, y )\n\t\tif (pCurrent.isCity()):\n\t\t\tbestCity = pCurrent.getPlotCity()\n\t\t\tfor iPlayerLoop in range(gc.getMAX_PLAYERS()):\n\t\t\t\tapCityList = PyPlayer(iPlayerLoop).getCityList()\n\t\t\t\tfor pCity in apCityList:\n\t\t\t\t\tiTotalCityValue = pCity.getPopulation()\n\t\t\t\t\tif (iTotalCityValue > iBestCityValue and not pCity.isBarbarian()):\n\t\t\t\t\t\tbestCity = pCity\n\t\t\t\t\t\tiBestCityValue = iTotalCityValue\n\t\t\treturn bestCity\n\t\treturn -1", "def jmax(self, tl):\n\t return self.JMAX0*exp(self.HAJ/(R*self.TO)*(1. - self.TO/tl))/(1. + exp((self.SVQ*tl - self.HDJ)/(R*tl)))", "def mutual_information_max(self):\n return np.log2(special.comb(self.Nr, self.coding_receptors))", "def calc_max_level(num_point):\n return int(numpy.ceil(numpy.log2(num_point)))", "def get_maxdistance_landmarktoPointcloud(self):\n if len(self.subsetnodes) == 0: # Make sure tat the landmark set is already constructed.\n self.select()\n landmarktopointcloud_dist = self.getLandmark_Witness_matrix()\n self.MaxMindist = np.nanmax(np.nanmin(landmarktopointcloud_dist, axis=0)) # Compute max of the min of each column\n return self.MaxMindist", "def getCitySightings(catalog,city):\n cities=catalog['cities']\n keyset=om.keySet(cities)\n selected_city=om.get(cities,city)['value']\n match=lt.newList(datastructure='ARRAY_LIST')\n max_sightings=0\n max_city=\"\"\n #Para obtener la ciudad con mas cantidad de avistamientos toca recorrer todo el arbol.\n #De esto se encarga el siguiente for-\n for c in lt.iterator(keyset):\n city_sightings=lt.size(om.get(cities, c)['value'])\n if city_sightings>max_sightings:\n max_sightings=city_sightings\n max_city=c\n \n #Añade todo los avistamientos de la ciudad ingresada a una lista.\n for sight in lt.iterator(selected_city):\n lt.addLast(match,sight)\n total=lt.size(match)\n ms.sort(match,compareDateTime)\n \n #Hacer la lista en caso de que halla mas que 6\n if total>6:\n joined=lt.subList(match,1,3)\n last=lt.subList(match,total-3,3)\n for a in lt.iterator(last):\n lt.addLast(joined, a)\n else:\n joined=lt.subList(match,1,total)\n return total, joined, max_sightings, max_city", "def get_maximum_value(dataset):\n d = [int(i) for i in dataset if i.isdigit()]\n op = [o for o in dataset if o in ['*', '-', '+']]\n n = len(d)\n d.insert(0, None)\n op.insert(0, None)\n m = [[0 for x in range(n+1)] for y in range(n+1)]\n M = [[0 for x in range(n+1)] for y in range(n+1)]\n for i in range(1, n+1):\n m[i][i] = d[i]\n M[i][i] = d[i]\n for s in range(1, n):\n for i in range(1, n-s+1):\n j = i + s\n m[i][j], M[i][j] = min_and_max(i, j, op, m, M)\n return M[1][n]", "def maxBelief(landscape, belief, rule):\n max = -1\n x,y = 0,0\n terrain = [0.1,0.3,0.7,0.9]\n if(rule == 1):\n for i in range(0, len(landscape)):\n for j in range(0, len(landscape)):\n if belief[i][j] > max:\n max = belief[i][j]\n x = i\n y = j\n else:\n for i in range(0, len(landscape)):\n for j in range(0, len(landscape)):\n if((belief[i][j] * (1 - terrain[landscape[i][j]])) > max):\n max = (belief[i][j] * (1 - terrain[landscape[i][j]]))\n x = i\n y = j\n return x,y", "def get_radius(city_type):\n if city_type == \"city\":\n return \"10mi\"\n elif city_type == \"census designated place\":\n return \"2mi\"\n elif city_type == \"village\" or city_type == \"town\":\n return \"3mi\"\n elif city_type == \"borough\":\n return \"7mi\"\n elif city_type == \"city (remainder)\":\n return \"4mi\"", "def extract_max_minus_cycle_2_QDischarge(batch,index):\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n max_minus_2 = max(batch[cell_no]['summary']['QD'][0:100]) - batch[cell_no]['summary']['QD'][1]\n max_minus_2 = log(abs(max_minus_2),10)\n X.append(max_minus_2)\n X = np.reshape(X,(-1,1))\n return X\n pass", "def maCruise(self):\n return .77", "def MaxColumn(I, E, d, ro):\n # http://en.wikipedia.org/wiki/Buckling\n B = 1.86635\n g = 9.81\n h = (9 * B**2 * E*I / (4*ro*g*np.pi * (d/2)**2))**(1/3)\n return h", "def alturamax(gravedad, veli):\r\n #se realiza varias operacione para encontrar la altura maxima \r\n maxima=(veli/2)*(veli/gravedad)\r\n #se regresa el valor de maxima\r\n return maxima", "def find_max_guesses():\n print(\"You'll get 5 guesses per problem!\")\n return 5", "def get_max_passes(example_height: int) -> int:\n return (example_height - 5) // 4", "def max_height(i_vy , g , i_h):\n t = i_vy/g\n return float(i_h + (i_vy*t)-(.5*g*math.pow(t,2)))", "def get_max(bij, exploration, bij_bool):\n\n#\tbij[bij_bool] = -sys.maxint - 1\n\n\tm = bij.argmax()\n\tc = np.unravel_index(m, bij.shape)\n\t#c = np.unravel_index(bij.argmax(), bij.shape)\n\n############################## A MODIFIER EVENTUELLEMENT #################\n#\tb = bij[bij_bool]\n#\tm = b.argmax()\n#\tind = np.unravel_index(m, b.shape)\n#\tc = np.where(bij == b[ind])\n#\tc = (c[0][0], c[1][0])\n#\tprint('mMAXx', bij[c])\n\treturn (c)", "def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)", "def maxQ(self,state):\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in self.actions:\r\n qCurr = self.Q[(state,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr \r\n return(maxQ,maxA)", "def correct_vcmax_for_temperature(self, Vcmax25, Tleaf):\n num = self.Q10_func(Vcmax25, self.Q10_Vcmax, Tleaf)\n den = (1.0 + math.exp(0.3 * (Tleaf - self.Tupper))) * \\\n (1.0 + math.exp(0.3 * (self.Tlower - Tleaf)))\n\n return num / den", "def get_max_praises(self):\n char = self.caller.char_ob\n clout = char.social_clout\n s_rank = char.item_data.social_rank\n return clout + ((8 - s_rank) // 2)", "def max_value(board, max_util, min_util, depth):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n nodes_generated += 1\r\n max_depth = max(max_depth,depth)\r\n \r\n if cutoff_search(board, depth):\r\n return evaluation(board)\r\n v = -1000\r\n moves = legal_moves(board,1)\r\n for move in moves:\r\n temp_board = camelot_board.Camelot(list(board.white),list(board.black))\r\n state = action(temp_board, move, 1)\r\n v = max(v, min_value(state, max_util, min_util, depth + 1))\r\n if v >= min_util:\r\n max_prune += 1\r\n return v\r\n max_util = max(max_util, v)\r\n return v", "def _multiple_values_max(self, maps, threshold):\r\n max_val = np.zeros((maps.shape[0], maps.shape[1]), dtype=np.float)\r\n for i in range(maps.shape[1]):\r\n cmin = np.min(maps[:,i])\r\n cmax = np.max(maps[:,i])\r\n limit = cmax - (cmax - cmin) * threshold[i]\r\n min_mask = maps[:,i] <= limit\r\n max_mask = maps[:,i] > limit\r\n # for an abundance map the delta is around [-1..1],\r\n # but it can be outside this interval, it's something\r\n # to test\r\n # a guard with a -10 value maybe ok.\r\n rmin = min_mask * -10\r\n max_val[:,i] = max_mask * maps[:,i] + rmin\r\n max_vec = np.max(max_val, axis=1)\r\n max_mask = max_vec > -10\r\n argmax = np.argmax(max_val, axis=1)\r\n return (argmax + 1) * max_mask", "def find_max():\n bridges = all_bridges\n bridges = [ b for b in bridges if b != None ]\n return max(bridges)", "def max_value(tree):\n max_utility = float(\"-inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n max_utility = max(max_utility, min_value(node))\n return max_utility", "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def find_dist_max(ar_coorx,ar_coory):\n nb_cell=len(ar_coorx)\n max_dist=0.\n for i in range(nb_cell):\n for j in range(nb_cell):\n max_dist=max(max_dist,distance(ar_coorx[i],ar_coory[i],ar_coorx[j],ar_coory[j]))\n return max_dist", "def maxinrow(row,span=2):\n maximum= 0\n offset= span - 1\n for i in range(0,len(row)-offset,1):\n print row[i:i+span]\n ans= product(row[i:i+span])\n maximum = ans if ans > maximum else maximum\n return maximum", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def nearest_neighbour_heuristic(self, city_count):\n return 1.0 / (500.0 * city_count)", "def max_pw(self, entity):\n return float(entity['pw_bb'][1])", "def minMaxBoucle(liste):\n minimum = float(\"inf\")\n maximum = -float(\"inf\")\n\n for index in range(0, 5):\n liste[index] = int(liste[index])\n if liste[index] > maximum:\n maximum = liste[index]\n if liste[index] < minimum:\n minimum = liste[index]\n return minimum, maximum", "def get_max_interest(self):\n max_int = max(self.table[\"interest\"])\n print(max_int)\n for index in self.table[\"index\"]:\n if self.table[\"interest\"][index] == max_int:\n return index", "def max(x):\n pass", "def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)", "def get_max_num_onsets():\r\n \"\"\" based on the numbers above, should equal to 932945... \"\"\"\r\n c1 = len(gen_onset_c1())\r\n c2 = len(gen_onset_c2())\r\n c3 = len(gen_onset_c3_c4())\r\n c4 = len(gen_onset_c3_c4())\r\n temp = c1\r\n temp = temp + ( c1 * c2 )\r\n temp = temp + ( c1 * c3 )\r\n temp = temp + ( c1 * c2 * c3 )\r\n temp = temp + ( c1 * c3 * c4 )\r\n temp = temp + ( c1 * c2 * c3 * c4 )\r\n return temp", "def max_reduce_nb(col, a, *args):\n return np.nanmax(a)", "def Max(data):\n return data.max()", "def get_maxcut_data_model():\n n = 5\n V = np.arange(0, n, 1)\n E = [(0, 1, 3.0), (1, 2, 2.0), (2, 3, 2.0), (3, 4, 3.0), (4, 0, 1.0), (0, 3, 3.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n return G", "def rough_l_max(L):\r\n # TODO: Check if L is sparse or not, and handle the situation accordingly\r\n\r\n l_max = np.linalg.eigvalsh(L.todense()).max()\r\n\r\n\r\n l_max_ub = 1.01 * l_max\r\n return l_max_ub", "def lab10_q3():\n return \"\"\"\n Use list comprehension max(lst_of_qvm, key=lambda qvm : total_revenue(qvm))\n\tThis makes each element of the list go through the key which gives total_revenue for each one. Then just get the max in that list\n \"\"\"", "def find_max_bin(self):\n x = self.local['clip']\n midrange = x[int(len(x)*0.2):int(len(x)*.2 + int(len(x)*.5))]\n self.max_bin = max(midrange)", "def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])", "def compute_optimal_block_maximum(block_counts) -> int:\n q1, q3 = compute_quartiles(block_counts)\n iqr = q3 - q1\n high_threshold = q3 + 1.5 * iqr\n return high_threshold", "def find_max(weather_data):\n if len(weather_data) == 0:\n return()\n\n value = float(weather_data[0])\n position = 0\n\n for index, weather in enumerate(weather_data):\n if float(weather) >= value:\n value= float(weather)\n position = index\n\n return(value, position)", "def get_max_cl(Re, r):\n xf = XFoil()\n if r <= 0.175: \n xf.airfoil = naca6409\n else:\n xf.airfoil = naca2412\n xf.Re = Re\n xf.Re = Re\n xf.max_iter = 200\n xf.n_crit = 9.00\n xf.xtr = [1.00, 1.00]\n xf.M = 0\n a_seq, cl_seq, cd_seq, cm_seq, cp_seq = xf.aseq(10,15,0.1)\n # ignore nan by making it 0\n cl_seq = np.nan_to_num(cl_seq)\n # find the maximum cl \n cl_maxi = np.max(cl_seq)\n # index of the maximum cl\n idx = np.argmax(cl_seq)\n return round(cl_maxi,2),round(a_seq[idx],2), round(cd_seq[idx],2)", "def _single_value_max(self, maps, threshold):\r\n max_vec = np.max(maps, axis=1)\r\n cmin = np.min(max_vec)\r\n cmax = np.max(max_vec)\r\n limit = cmax - (cmax - cmin) * threshold\r\n max_mask = max_vec > limit\r\n argmax = np.argmax(maps, axis=1)\r\n return (argmax + 1) * max_mask", "def get_max(self):\n return self.serie.max()", "def centro_5(tab, jog):\r\n if eh_posicao_livre(tab, 5):\r\n return 5", "def hub_city(g):\n max_flights = None\n hub_cities = []\n for key in g.city_dict:\n if(len(g.city_dict[key].get_flights_in()) > max_flights):\n max_flights = len((g.city_dict[key]).flights_in)\n \n \n for key in g.city_dict:\n if(len(g.city_dict[key].get_flights_in()) == max_flights): \n hub_cities.append(g.city_dict[key].get_name()) \n \n \n return hub_cities", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def main():\n actual = [[75,],\n [95, 64],\n [17, 47, 82],\n [18, 35, 87, 10],\n [20, 4, 82, 47, 65],\n [19, 1, 23, 75, 3, 34],\n [88, 2, 77, 73, 7, 63, 67],\n [99, 65, 4, 28, 6, 16, 70, 92],\n [41, 41, 26, 56, 83, 40, 80, 70, 33],\n [41, 48, 72, 33, 47, 32, 37, 16, 94, 29],\n [53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],\n [70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],\n [91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],\n [63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],\n [4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]]\n print find_max_sum(actual)" ]
[ "0.61874086", "0.59167874", "0.58895576", "0.5768458", "0.57580644", "0.5663183", "0.5629974", "0.5617509", "0.557116", "0.55564797", "0.5527265", "0.55069757", "0.55021864", "0.5498278", "0.54592294", "0.5444004", "0.54354906", "0.5419152", "0.5413824", "0.539782", "0.53869736", "0.5385826", "0.5370342", "0.53486556", "0.5343289", "0.53318495", "0.5330176", "0.5286902", "0.5277297", "0.5271573", "0.52480835", "0.5230609", "0.52264357", "0.5216299", "0.5212722", "0.52092093", "0.5201362", "0.5198314", "0.519814", "0.51840687", "0.51815504", "0.51800245", "0.5162866", "0.5162489", "0.5158139", "0.5143993", "0.51425683", "0.51403606", "0.51359504", "0.5133685", "0.5130726", "0.51079875", "0.50988644", "0.5094006", "0.50937855", "0.5090383", "0.50855386", "0.5084499", "0.5080128", "0.50791365", "0.50776565", "0.5072063", "0.5070967", "0.50604147", "0.5059093", "0.5056333", "0.5049876", "0.50460714", "0.5045498", "0.5043003", "0.5043003", "0.5043003", "0.5043003", "0.5043003", "0.5043003", "0.5038136", "0.50349605", "0.5034598", "0.50262684", "0.5023467", "0.50226575", "0.50224", "0.5020368", "0.5020141", "0.50102556", "0.50091195", "0.5007175", "0.5006857", "0.5006519", "0.5005545", "0.50048506", "0.50034076", "0.49907857", "0.49851382", "0.49840322", "0.49837095", "0.49832883", "0.49818528", "0.49740604", "0.49716872", "0.4969433" ]
0.0
-1
Given a query, and an update clause, update all (and only) object returned by query.
def test_update(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, **kwargs):\n expr = self.model.__table__.update().where(self.query).values(**kwargs)\n return self._runquery(expr)", "def update(self, **kwargs):\n self._not_support_combined_queries(\"update\")\n if self.query.is_sliced:\n raise TypeError(\"Cannot update a query once a slice has been taken.\")\n self._for_write = True\n query = self.query.chain(sql.UpdateQuery)\n query.add_update_values(kwargs)\n\n # Inline annotations in order_by(), if possible.\n new_order_by = []\n for col in query.order_by:\n alias = col\n descending = False\n if isinstance(alias, str) and alias.startswith(\"-\"):\n alias = alias.removeprefix(\"-\")\n descending = True\n if annotation := query.annotations.get(alias):\n if getattr(annotation, \"contains_aggregate\", False):\n raise exceptions.FieldError(\n f\"Cannot update when ordering by an aggregate: {annotation}\"\n )\n if descending:\n annotation = annotation.desc()\n new_order_by.append(annotation)\n else:\n new_order_by.append(col)\n query.order_by = tuple(new_order_by)\n\n # Clear any annotations so that they won't be present in subqueries.\n query.annotations = {}\n with transaction.mark_for_rollback_on_error(using=self.db):\n rows = query.get_compiler(self.db).execute_sql(CURSOR)\n self._result_cache = None\n return rows", "def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError", "def update(cls, query_filter, query_update):\n if query_update.get('$set') and not query_update.get('$set').get('updated'):\n query_update['$set']['updated'] = datetime.datetime.utcnow()\n\n return mongo_db[cls.__collection__].update_one(\n query_filter,\n query_update\n )", "def _update(self, values):\n if self.query.is_sliced:\n raise TypeError(\"Cannot update a query once a slice has been taken.\")\n query = self.query.chain(sql.UpdateQuery)\n query.add_update_fields(values)\n # Clear any annotations so that they won't be present in subqueries.\n query.annotations = {}\n self._result_cache = None\n return query.get_compiler(self.db).execute_sql(CURSOR)", "def _update_all(self, criteria: Q, *args, **kwargs):\n conn = self._get_session()\n qs = conn.query(self.model_cls).filter(self._build_filters(criteria))\n try:\n values = {}\n if args:\n values = args[\n 0\n ] # `args[0]` is required because `*args` is sent as a tuple\n values.update(kwargs)\n updated_count = qs.update(values)\n except DatabaseError as exc:\n logger.error(f\"Error while updating all: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return updated_count", "def _execute_update(self, updateQuery, updateValues):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(updateQuery, updateValues)", "def update(self, updates, predicate):\n for row in self.rows:\n if predicate(row):\n for column, new_value in updates.items():\n row[column] = new_value", "def update(\n self,\n *args: Union[dict, Mapping],\n session: Optional[ClientSession] = None\n ):\n self.set_session(session=session)\n return (\n self.UpdateQueryType(\n document_model=self.document_model,\n find_query=self.get_filter_query(),\n )\n .update(*args)\n .set_session(session=self.session)\n )", "def update(self, query_conditions, cols_vals_to_update):\n matched_queries = self.__return_query('query', query_conditions)\n if matched_queries == None:\n raise Exception('Sorry, your query did not match any data.')\n else:\n #Loop through and update each row where the query returned true\n for found_row in matched_queries:\n #Check to make sure all the column names given by user match the column names in the table.\n row_id = found_row['row_id']\n self.update_row(row_id, cols_vals_to_update)", "def update(self, updates: dict, dry=False):\n self._updates += (UpdateQueryExpression(updates),)\n self.set_action(\"update\")\n if dry:\n return self\n\n return self.connection.query(self.to_sql(), self._bindings)", "def set_list(self, table, q_filter, update_dict, unset=None, pull=None, push=None, push_list=None, pull_list=None):\n with self.lock:\n updated = 0\n found = 0\n for _, db_item in self._find(table, self._format_filter(q_filter)):\n found += 1\n if self._update(db_item, update_dict, unset=unset, pull=pull, push=push, push_list=push_list,\n pull_list=pull_list):\n updated += 1\n # if not found and fail_on_empty:\n # raise DbException(\"Not found entry with '{}'\".format(q_filter), HTTPStatus.NOT_FOUND)\n return {\"updated\": updated} if found else None", "async def update_many(\n self,\n update_document: Dict[str, Any],\n *,\n filter: Optional[Dict[str, Any]] = DEFAULT_FILTER,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> UpdateResult:\n return await self._database.update_many(\n self.name,\n update_document=update_document,\n filter=filter,\n session=session,\n **kwargs,\n )", "def update(self, query, callback=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data", "def update(self):\r\n if self.instance is None:\r\n raise CQLEngineException(\"DML Query intance attribute is None\")\r\n assert type(self.instance) == self.model\r\n\r\n statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp)\r\n #get defined fields and their column names\r\n for name, col in self.model._columns.items():\r\n if not col.is_primary_key:\r\n val = getattr(self.instance, name, None)\r\n val_mgr = self.instance._values[name]\r\n\r\n # don't update something that is null\r\n if val is None:\r\n continue\r\n\r\n # don't update something if it hasn't changed\r\n if not val_mgr.changed and not isinstance(col, Counter):\r\n continue\r\n\r\n if isinstance(col, (BaseContainerColumn, Counter)):\r\n # get appropriate clause\r\n if isinstance(col, List): klass = ListUpdateClause\r\n elif isinstance(col, Map): klass = MapUpdateClause\r\n elif isinstance(col, Set): klass = SetUpdateClause\r\n elif isinstance(col, Counter): klass = CounterUpdateClause\r\n else: raise RuntimeError\r\n\r\n # do the stuff\r\n clause = klass(col.db_field_name, val,\r\n previous=val_mgr.previous_value, column=col)\r\n if clause.get_context_size() > 0:\r\n statement.add_assignment_clause(clause)\r\n else:\r\n statement.add_assignment_clause(AssignmentClause(\r\n col.db_field_name,\r\n col.to_database(val)\r\n ))\r\n\r\n if statement.get_context_size() > 0 or self.instance._has_counter:\r\n for name, col in self.model._primary_keys.items():\r\n statement.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(statement)\r\n\r\n self._delete_null_columns()", "def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)", "def update(self, **values):\r\n if not values:\r\n return\r\n\r\n nulled_columns = set()\r\n us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp)\r\n for name, val in values.items():\r\n col_name, col_op = self._parse_filter_arg(name)\r\n col = self.model._columns.get(col_name)\r\n # check for nonexistant columns\r\n if col is None:\r\n raise ValidationError(\"{}.{} has no column named: {}\".format(self.__module__, self.model.__name__, col_name))\r\n # check for primary key update attempts\r\n if col.is_primary_key:\r\n raise ValidationError(\"Cannot apply update to primary key '{}' for {}.{}\".format(col_name, self.__module__, self.model.__name__))\r\n\r\n val = col.validate(val)\r\n if val is None:\r\n nulled_columns.add(col_name)\r\n continue\r\n\r\n # add the update statements\r\n if isinstance(col, Counter):\r\n # TODO: implement counter updates\r\n raise NotImplementedError\r\n elif isinstance(col, (List, Set, Map)):\r\n if isinstance(col, List):\r\n klass = ListUpdateClause\r\n elif isinstance(col, Set):\r\n klass = SetUpdateClause\r\n elif isinstance(col, Map):\r\n klass = MapUpdateClause\r\n else:\r\n raise RuntimeError\r\n us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op))\r\n else:\r\n us.add_assignment_clause(AssignmentClause(\r\n col_name, col.to_database(val)))\r\n\r\n if us.assignments:\r\n self._execute(us)\r\n\r\n if nulled_columns:\r\n ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where)\r\n self._execute(ds)", "def update(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n return cursor.rowcount\n finally:\n cursor.close()", "def update_many(self, query: str, args):\n cursor = self._cursor()\n try:\n cursor.execute_many(query, args)\n return cursor.rowcount\n finally:\n cursor.close()", "def set_one(self, table, q_filter, update_dict, fail_on_empty=True, unset=None, pull=None, push=None,\n push_list=None, pull_list=None):\n with self.lock:\n for i, db_item in self._find(table, self._format_filter(q_filter)):\n updated = self._update(db_item, update_dict, unset=unset, pull=pull, push=push, push_list=push_list,\n pull_list=pull_list)\n return {\"updated\": 1 if updated else 0}\n else:\n if fail_on_empty:\n raise DbException(\"Not found entry with _id='{}'\".format(q_filter), HTTPStatus.NOT_FOUND)\n return None", "def update(self, **kw):\n colmap = {}\n for k, v in kw.iteritems():\n colmap[self.__attrmap__[k]] = v\n\n yield Update(\n colmap,\n Where=self._primaryKeyComparison(self._primaryKeyValue())\n ).on(self.transaction)\n\n self.__dict__.update(kw)", "def mmo_execute_update_on_mongos(self, mmo_connection, query, update_document, execution_database, collection, is_update_one=True, upsert=False):\n mongos_server = self.mmo_mongos_servers(mmo_connection)[0]\n hostname, port = mongos_server[\"hostname\"], mongos_server[\"port\"]\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n c = self.mmo_connect_mongos(hostname,\n port,\n auth_dic[\"username\"],\n auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if is_update_one:\n update_output = c[execution_database][collection].find_one_and_update(query, update_document, return_document=ReturnDocument.AFTER, upsert=upsert)\n else:\n update_output = c[execution_database][collection].find_many_and_update(query, update_document, return_document=ReturnDocument.AFTER, upsert=upsert)\n return update_output", "def my_find_update(the_coll, search_dict, update_dict):\n x = the_coll.find(search_dict,limit=1)\n if x.count() == 0:\n the_coll.insert(update_dict)\n else:\n for x in the_coll.find(search_dict):\n x.update(update_dict)\n the_coll.save(x)", "def find_and_modify(self, query={}, update=None, upsert=False, sort=None,\n **kwargs):\n if (not update and not kwargs.get('remove', None)):\n raise ValueError(\"Must either update or remove\")\n if (update and kwargs.get('remove', None)):\n raise ValueError(\"Can't do both update and remove\")\n if query:\n kwargs['query'] = query\n if update:\n kwargs['update'] = update\n if upsert:\n kwargs['upsert'] = upsert\n if sort:\n if isinstance(sort, list):\n kwargs['sort'] = helpers._index_document(sort)\n elif (isinstance(sort, OrderedDict) or isinstance(sort, dict) and\n len(sort) == 1):\n kwargs['sort'] = sort\n else:\n raise TypeError(\"sort must be a list of (key, direction) \"\n \"pairs, a dict of len 1, or an instance of \"\n \"OrderedDict\")\n out = self.database.command(\"findAndModify\", self.name, **kwargs)\n if not out['ok']:\n if out[\"errmsg\"] == \"No matching object found\":\n return None\n else:\n raise ValueError(\"Unexpected Error: %s\" % (out,))\n return out.get('value')", "def update_many(collection: Collection, query, data_to_update):\n return collection.update_many(query, {'$set': data_to_update}).matched_count", "def _batch_update(self, query, mutation):\n logger.info(\"Performing batch update on %s. Mutation: %s\", query, mutation)\n modified = 0\n for doc in self.instances.find(query):\n with lock_instance(doc['_id']):\n pre_update_doc = self.instances.find_one({'_id' : doc['_id']})\n result = self.instances.update_one({'_id': doc['_id']}, mutation)\n assert result.modified_count == 1\n modified += 1\n updated_doc = self.instances.find_one({'_id': doc['_id']})\n instance = FixtureInstance.deserialize_mongodoc(updated_doc)\n try:\n self.axdb_client.update_fixture_instance(instance.axdbdoc())\n except Exception:\n logger.exception(\"Failed to persist updates for %s. Undoing cache update\", instance)\n self.instances.replace_one({'_id' : instance.id}, pre_update_doc)\n raise\n logger.info(\"%s fixture instances modified\", modified)", "def update(self, **kwargs):\n assert kwargs, 'No fields specified for update'\n self._verify_mutation_allowed()\n fields = comma_join('`%s` = %s' % (name, arg_to_sql(expr)) for name, expr in kwargs.items())\n conditions = (self._where_q & self._prewhere_q).to_sql(self._model_cls)\n sql = 'ALTER TABLE $db.`%s` UPDATE %s WHERE %s' % (self._model_cls.table_name(), fields, conditions)\n self._database.raw(sql)\n return self", "def update_many(\n cls,\n *,\n pks: List[Union[str, int]],\n update: Dict[str, Any],\n sychronize_session: bool = False,\n ) -> None:\n if pks:\n db.session.query(cls).filter(\n getattr(cls, cls.get_primary_key()).in_(pks)\n ).update(update, synchronize_session=sychronize_session)\n db.session.commit()\n cache.delete_many(*(cls.create_cache_key(pk) for pk in pks))", "def update(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return UpdateQuery(cls.runtime)", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def bulk_update(self, iterable):\n inserted, updated = [], []\n for d, h in iterable:\n if d in self:\n updated.append((d, h))\n else:\n inserted.append((d, h))\n self._update_bulk(updated)\n self._insert_bulk(inserted)", "def update(self, data, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.update({'_id': id_obj}, {\"$set\": data})\n return self.collection.update(query_data, {\"$set\": data})", "def test_update_values(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, str(i))", "def update(self):\n ret = True\n fields = self.depopulate(True)\n q = self.query\n q.set_fields(fields)\n\n pk = self.pk\n if pk:\n q.is_field(self.schema.pk.name, pk)\n\n else:\n raise ValueError(\"You cannot update without a primary key\")\n\n if q.update():\n fields = q.fields\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def test_update_values(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == str(i)", "def update_element(cls, condition=None, async=True, **kwargs):\n\n command = cls.__table__.update().values(**kwargs)\n\n if condition is not None:\n command = command.where(condition)\n\n return DBConnection.execute_command(command=command, async=async)", "def paginated_update(\n query: Query,\n print_page_progress: Optional[Union[Callable[[int, int], None], bool]] = None,\n batch_size: int = DEFAULT_BATCH_SIZE,\n) -> Iterator[Any]:\n start = 0\n count = query.count()\n session: Session = inspect(query).session\n if print_page_progress is None or print_page_progress is True:\n print_page_progress = lambda current, total: print(\n f\" {current}/{total}\", end=\"\\r\"\n )\n while start < count:\n end = min(start + batch_size, count)\n for obj in query[start:end]:\n yield obj\n session.merge(obj)\n session.commit()\n if print_page_progress:\n print_page_progress(end, count)\n start += batch_size", "def update(self, fieldupdate='abc', condition='INVALID'):\n sql = self.generate_update_sql(fieldupdate, condition)\n self.sqlhistory.append(sql)\n return self.sql_update(sql)", "def update(cls, values):\n result = yield UpdateQuery(cls, values).execute()\n returnValue(result)", "def bulk_update(self, objs, fields, batch_size=None):\n if batch_size is not None and batch_size <= 0:\n raise ValueError(\"Batch size must be a positive integer.\")\n if not fields:\n raise ValueError(\"Field names must be given to bulk_update().\")\n objs = tuple(objs)\n if any(obj.pk is None for obj in objs):\n raise ValueError(\"All bulk_update() objects must have a primary key set.\")\n fields = [self.model._meta.get_field(name) for name in fields]\n if any(not f.concrete or f.many_to_many for f in fields):\n raise ValueError(\"bulk_update() can only be used with concrete fields.\")\n if any(f.primary_key for f in fields):\n raise ValueError(\"bulk_update() cannot be used with primary key fields.\")\n if not objs:\n return 0\n for obj in objs:\n obj._prepare_related_fields_for_save(\n operation_name=\"bulk_update\", fields=fields\n )\n # PK is used twice in the resulting update query, once in the filter\n # and once in the WHEN. Each field will also have one CAST.\n self._for_write = True\n connection = connections[self.db]\n max_batch_size = connection.ops.bulk_batch_size([\"pk\", \"pk\"] + fields, objs)\n batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size\n requires_casting = connection.features.requires_casted_case_in_updates\n batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))\n updates = []\n for batch_objs in batches:\n update_kwargs = {}\n for field in fields:\n when_statements = []\n for obj in batch_objs:\n attr = getattr(obj, field.attname)\n if not hasattr(attr, \"resolve_expression\"):\n attr = Value(attr, output_field=field)\n when_statements.append(When(pk=obj.pk, then=attr))\n case_statement = Case(*when_statements, output_field=field)\n if requires_casting:\n case_statement = Cast(case_statement, output_field=field)\n update_kwargs[field.attname] = case_statement\n updates.append(([obj.pk for obj in batch_objs], update_kwargs))\n rows_updated = 0\n queryset = self.using(self.db)\n with transaction.atomic(using=self.db, savepoint=False):\n for pks, update_kwargs in updates:\n rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)\n return rows_updated", "def update_all_dict_bywhereclause(cls, updatedict, whereclause):\n return cls.dbm().modelclass_update_all_dict_bywhereclause(cls, updatedict, whereclause)", "def update( # type: ignore[override]\n self,\n query: Union[\"Update\", str],\n initNs: Dict[str, Any] = {}, # noqa: N803\n initBindings: Dict[\"str\", \"Identifier\"] = {},\n queryGraph: Optional[str] = None,\n DEBUG: bool = False,\n ):\n if not self.update_endpoint:\n raise Exception(\"Update endpoint is not set!\")\n\n self.debug = DEBUG\n assert isinstance(query, str)\n query = self._inject_prefixes(query, initNs)\n\n if self._is_contextual(queryGraph):\n if TYPE_CHECKING:\n # _is_contextual will never return true if context is None\n assert queryGraph is not None\n query = self._insert_named_graph(query, queryGraph)\n\n if initBindings:\n # For INSERT and DELETE the WHERE clause is obligatory\n # (http://www.w3.org/TR/2013/REC-sparql11-query-20130321/#rModify)\n # Other query types do not allow variables and don't\n # have a WHERE clause. This also works for updates with\n # more than one INSERT/DELETE.\n v = list(initBindings)\n values = \"\\nVALUES ( %s )\\n{ ( %s ) }\\n\" % (\n \" \".join(\"?\" + str(x) for x in v),\n \" \".join(self.node_to_sparql(initBindings[x]) for x in v),\n )\n\n query = self.where_pattern.sub(\"WHERE { \" + values, query)\n\n self._transaction().append(query)\n if self.autocommit:\n self.commit()", "def update(self, dict=None, **kwargs):\n data = {}\n if dict:\n data.update(dict, **kwargs)\n else:\n data.update(**kwargs)\n self.multi_set(data)", "def executeUpdate(payload, newQuery):\r\n\tq = newQuery.format(**payload)\r\n\tdb.execute(q)\r\n\tdata = db.fetchall()\r\n\treturn data", "def update(self, data: Union[QueryWithResponse, List[QueryWithResponse]], **kwargs):\n raise NotImplementedError", "def update(self, tablename, values, condition):\n with self.engine.connect() as conn:\n try:\n update_stmt = update(tablename).values(values)\n if condition:\n update_stmt = update_stmt.where(condition)\n result = conn.execute(update_stmt)\n except SQLAlchemyError as e:\n print(\"DB update error: {}\".format(e))", "async def update_one(\n self, query: Union[dict, MotycQuery] = None,\n _id=None, *,\n update: Union[dict, MotycQuery],\n inject_default_id=False\n ) -> T:\n\n mongo_query = self.build_mongo_query(query, _id=_id)\n update_query = self.build_mongo_query(update)\n\n document = await self.collection.find_one_and_update(\n mongo_query,\n update_query,\n return_document=ReturnDocument.AFTER\n )\n\n if document is None: raise NotFound(mongo_query)\n\n return self.parse_document(document, inject_default_id=inject_default_id)", "def update(self, obj, data):\n self.get(obj[self.model.pk_field.name])\n self.validate_fields(data)\n\n fields = []\n values = []\n\n for k, v in data.iteritems():\n if k in self.model.get_fields_name():\n fields.append(k)\n values.append(v)\n\n conn = self.get_connector()\n cursor = conn.cursor()\n update = \" ,\".join([\"{0}='{1}'\".format(f, v) for f, v in zip(fields,\n values)])\n query = \"update {0} set {1} WHERE {2}={3}\".format(\n self.ressource_config[\"table\"],\n update,\n self.model.pk_field.name,\n obj[self.model.pk_field.name]\n )\n\n cursor.execute(query)\n conn.commit()\n conn.close()\n\n return self.get(obj[self.model.pk_field.name])", "def test_update_all(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 13})\n assert n_updated == 3\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 13\n assert andy in items\n assert pandy in items\n assert candy in items", "def update( d, **kw):\n d.update( d, **kw )\n return d", "def run_update_example():\n table = \"actors\"\n update_values = {\n 'name': \"Christopher\",\n 'last_name': \"Eccleston\"}\n update_conds = {'id': 1}\n print querify.update_from_dict(table, update_values, update_conds)", "def sql_filtered_update(table, set_columns, where_columns, values):\n for index in range(len(set_columns) - 1, -1, -1):\n if values[index] is None:\n del set_columns[index]\n del values[index]\n set_columns = [col + ' = ?' for col in set_columns]\n columns_to_set = ', '.join(set_columns)\n where_columns = [col + ' = ?' for col in where_columns]\n where_condition = ' AND '.join(where_columns)\n query = f'UPDATE {table} SET {columns_to_set} WHERE {where_condition}'\n return query, values", "def parse_update(query: dict, dataset: Dataset, default_graph: str, context: dict, as_of: Optional[datetime] = None) -> Tuple[PreemptableIterator, dict]:\n # TODO change that, only used for testing\n consistency_level = \"serializable\"\n # consistency_level = dataset._config[\"consistency\"] if \"consistency\" in dataset._config else \"atomic_per_row\"\n operations = translateUpdate(parseUpdate(query))\n if len(operations) > 1:\n raise UnsupportedSPARQL(\"Only a single INSERT DATA/DELETE DATA is permitted by query. Consider sending yourt query in multiple SPARQL queries.\")\n operation = operations[0]\n if operation.name == 'InsertData' or operation.name == 'DeleteData':\n # create RDF quads to insert/delete into/from the default graph\n quads = get_quads_from_update(operation, default_graph)\n # build the preemptable update operator used to insert/delete RDF triples\n if operation.name == 'InsertData':\n return InsertOperator(quads, dataset), dict()\n else:\n return DeleteOperator(quads, dataset), dict()\n elif operation.name == 'Modify':\n where_root = operation.where\n # unravel shitty things chained together\n if where_root.name == 'Join':\n if where_root.p1.name == 'BGP' and len(where_root.p1.triples) == 0:\n where_root = where_root.p2\n elif where_root.p2.name == 'BGP' and len(where_root.p2.triples) == 0:\n where_root = where_root.p1\n\n # for consistency = serializable, use a SerializableUpdate iterator\n if consistency_level == \"serializable\":\n # build the read iterator\n cardinalities = list()\n read_iterator = parse_query_node(where_root, dataset, [default_graph], context, cardinalities, as_of=as_of)\n # get the delete and/or insert templates\n delete_templates = list()\n insert_templates = list()\n if operation.delete is not None:\n delete_templates = get_quads_from_update(operation.delete, default_graph)\n if operation.insert is not None:\n insert_templates = get_quads_from_update(operation.insert, default_graph)\n\n # build the SerializableUpdate iterator\n return SerializableUpdate(dataset, read_iterator, delete_templates, insert_templates), cardinalities\n else:\n # Build the IF EXISTS style query from an UPDATE query with bounded RDF triples\n # in the WHERE, INSERT and DELETE clause.\n\n # assert that all RDF triples from the WHERE clause are bounded\n if_exists_quads = where_root.triples\n for s, p, o in if_exists_quads:\n if type(s) is Variable or type(s) is BNode or type(p) is Variable or type(p) is BNode or type(o) is Variable or type(o) is BNode:\n raise UnsupportedSPARQL(\"Only INSERT DATA and DELETE DATA queries are supported by the SaGe server. For evaluating other type of SPARQL UPDATE queries, please use a Sage Smart Client.\")\n # localize all triples in the default graph\n if_exists_quads = list(localize_triples(where_root.triples, [default_graph]))\n\n # get the delete and/or insert triples\n delete_quads = list()\n insert_quads = list()\n if operation.delete is not None:\n delete_quads = get_quads_from_update(operation.delete, default_graph)\n if operation.insert is not None:\n insert_quads = get_quads_from_update(operation.insert, default_graph)\n\n # build the UpdateSequenceOperator operator\n if_exists_op = IfExistsOperator(if_exists_quads, dataset, as_of)\n delete_op = DeleteOperator(delete_quads, dataset)\n insert_op = DeleteOperator(insert_quads, dataset)\n return UpdateSequenceOperator(if_exists_op, delete_op, insert_op), dict()\n else:\n raise UnsupportedSPARQL(\"Only INSERT DATA and DELETE DATA queries are supported by the SaGe server. For evaluating other type of SPARQL UPDATE queries, please use a Sage Smart Client.\")", "def update_many(\n self,\n *args: Union[dict, Mapping],\n session: Optional[ClientSession] = None\n ) -> UpdateMany:\n return self.update(*args, session=session)", "def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))", "def update_query(self, **updates):\r\n self._url_updates.update(updates)", "def set_many(self, update_dict):\n for key, value in update_dict.items():\n # We just call `set` directly here, because this is an in-memory representation\n # thus we don't concern ourselves with bulk writes.\n self.set(key, value)", "def bulk_update(self, iterable):\n inserted, updated = [], []\n for d, h in iterable:\n if -d in self:\n updated.append((-d, h))\n else:\n inserted.append((-d, h))\n self._update_bulk(updated)\n self._insert_bulk(inserted)", "def test_result_query_update_criteria(cbcsdk_mock):\n api = cbcsdk_mock.api\n query = api.select(Result).run_id(2).update_criteria(\"my.key.dot.notation\", [\"criteria_val_1\"])\n query = query.update_criteria(\"my.key.dot.notation\", [\"criteria_val_2\"])\n assert query._build_request(start=0, rows=100) == {\"criteria\": {\n \"my.key.dot.notation\": [\"criteria_val_1\", \"criteria_val_2\"]\n }, \"start\": 0, \"rows\": 100, \"query\": \"\"}", "def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)", "def test_map_update_updates(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": '3', \"baz\": '4'})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})", "def test_update_multiple(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 14}, age=12)\n assert n_updated == 2\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 14\n assert andy in items\n assert pandy in items\n assert candy in items", "def update(self):\n values = {}\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if not attr.auto_value and attr._updated:\n values[field] = getattr(self, field)\n object.__setattr__(attr, '_updated', False)\n result = self.where({self.__class__.get_primary():self.primary})\n if len(values) == 0:\n logging.warning('update() called on model with no changed fields.')\n return None\n return result.update(values)[0]", "def test_map_update_updates(self):\r\n partition = uuid4()\r\n cluster = 1\r\n TestQueryUpdateModel.objects.create(\r\n partition=partition, cluster=cluster,\r\n text_map={\"foo\": '1', \"bar\": '2'})\r\n TestQueryUpdateModel.objects(\r\n partition=partition, cluster=cluster).update(\r\n text_map__update={\"bar\": '3', \"baz\": '4'})\r\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})", "def test_update_calls() -> None:\n expected_row = [Model(pk1=\"1234\", name=\"test\")]\n mock_session = UnifiedAlchemyMagicMock(\n data=[\n (\n [\n mock.call.query(Model),\n mock.call.filter(Model.pk1 == 3),\n ],\n expected_row,\n )\n ]\n )\n # Test all()\n actual_row = mock_session.query(Model).filter(Model.pk1 == 3).all()\n assert expected_row == actual_row\n mock_session.query(Model).filter(Model.pk1 == 3).update({\"pk1\": 3})\n actual_row = mock_session.query(Model).filter(Model.pk1 == 3).all()\n assert expected_row == actual_row\n # Test delete()\n assert None is mock_session.query(Model).filter(Model.pk1 == 3).update(\n {\"pk1\": 3}, synchronize_session=\"evaluate\"\n )\n deleted_count = mock_session.query(Model).filter(Model.pk1 == 3).delete()\n assert 1 == deleted_count\n actual_row = mock_session.query(Model).filter(Model.pk1 == 3).all()\n assert [] == actual_row", "def update_one(collection: Collection, query, data_to_update):\n return collection.update_one(query, {'$set': data_to_update}).matched_count == 1", "def update(table_name, record_id=None, filters=None, updates=None):\n if not filters:\n filters = {}\n if not updates:\n updates = {}\n\n with get_connection() as conn:\n if record_id:\n return rethink.table(table_name).get(record_id)\\\n .update(updates).run(conn)\n else:\n return rethink.table(table_name).filter(filters)\\\n .update(updates).run(conn)", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def upsert(saved_query):\n saved_query.save()\n return saved_query", "def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def send_update(self, statement):\n msg_type, msg = self.protocol.build_update(statement)\n self._execute_prepared_pipeline(msg_type, msg, statement)\n return Result(self)", "def test_mixed_value_and_null_update(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def update(self, target, query):\n node = self._data[target]\n name = \"%s node %.8s\" % (node['type'], target)\n\n query.update({\n 'type': node['type'],\n 'model': node['model']\n })\n\n logger.info(\"Validating query\")\n NodeValidator.validate(query)\n\n self._data[target] = dict_update(node, query, name)\n logger.info(\"Updated parameters above of %s\" % name)\n\n return {target: self._data[target]}", "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)", "def set_updates(self, updates: dict, dry=False):\n self._updates += (UpdateQueryExpression(updates),)\n return self", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def test_mixed_value_and_null_update(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == (None if i == 3 else str(i))", "def sql(self):\n\n if not self._table_names:\n raise ValueError('UPDATE requires at least one table')\n if not self._values and not self._values_raw:\n raise ValueError('UPDATE requires at least one value')\n\n table_refs = [', '.join(self._table_names)]\n param_values = []\n col_names = []\n inline_values = []\n set_values = []\n\n self._append_join_table_refs(self._table_names[0], table_refs)\n\n if self._values:\n for col, val in self._values.iteritems():\n col_names.append(col)\n self._parameterize_values(val, inline_values, param_values)\n\n for col in self._values_raw:\n val, val_params = self._values_raw[col]\n col_names.append(col)\n inline_values.append(val)\n if val_params is not None and self.placeholder:\n param_values.extend(val_params)\n\n assert len(col_names) == len(inline_values)\n for col, val in zip(col_names, inline_values):\n set_values.append(u'{0}={1}'.format(self.quote_col_ref(col), val))\n\n # MySQL UPDATE syntax as of 5.7:\n #\n # Single-table syntax:\n #\n # UPDATE [LOW_PRIORITY] [IGNORE] table_reference\n # SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ...\n # [WHERE where_condition]\n # [ORDER BY ...]\n # [LIMIT row_count]\n #\n # Multiple-table syntax:\n #\n # UPDATE [LOW_PRIORITY] [IGNORE] table_references\n # SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ...\n # [WHERE where_condition]\n\n sql = ['UPDATE']\n\n if self.query_options:\n sql.extend(self.query_options)\n\n if self.ignore_error:\n sql.append('IGNORE')\n\n sql.append(' '.join(table_refs))\n\n sql.append('SET')\n sql.append(', '.join(set_values))\n\n if self._where_cond_root.has_conds:\n sql.append('WHERE')\n sql.append(self._where_cond_root.sql(param_values))\n\n if self._orderby_conds:\n if len(self._table_names) + len(self._join_refs) > 1:\n raise ValueError('Multiple-table UPDATE does not support ORDER BY')\n\n sql.append('ORDER BY')\n sql.append(', '.join(self._orderby_conds))\n\n if self._limit:\n if len(self._table_names) + len(self._join_refs) > 1:\n raise ValueError('Multiple-table UPDATE does not support LIMIT')\n\n sql.append('LIMIT {0}'.format(self._limit))\n\n if self.placeholder:\n return ' '.join(sql), param_values if param_values else None\n assert not param_values\n return ' '.join(sql)", "def update(self):\n sql = 'UPDATE {} SET {} where {}=%s'.format(\n self.TABLE_NAME,\n ', '.join(map(lambda f: '{}=%s'.format(f), self._dict)),\n self.PRIMARY_KEY\n )\n args = list(map(self._get_value_or_default, self._dict))\n args.append(self._get_value_or_default(self.PRIMARY_KEY))\n cursor = yield self._pool.execute(sql, args)\n count = cursor.rowcount\n result = True if count == 1 else False\n return result", "def update_all(collection: Collection, data_to_update):\n return collection.update_many({}, {'$set': data_to_update}).matched_count", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n\n to_set = [\n \"{0} = %({0})s\".format(f) for f in fields_to_modify\n if f in self.editable_fields\n ]\n if len(to_set) == 0:\n print('Warning: No data to set', data)\n return\n\n query = \"UPDATE {} SET {} WHERE {}\".format(\n self._NAME,\n ', '.join(to_set),\n ' AND '.join(\"{0} = %({0})s\".format(f) for f in self.primary_fields),\n )\n self._execute(query, data)", "def set_many(self, update_dict):\n for key, value in update_dict.items():\n self.set(key, value)", "def test_updating_multiple_records_through_filter_with_kwarg_value(\n self, test_domain\n ):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all(last_name=\"Fraud\")\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def update(self, *args):\n qry = UpdateEntityQuery(self)\n self.context.add_query(qry)\n return self", "def update_db_record(self, update_body: dict):\n for attribute, value in update_body.items():\n if attribute in self._update_allowed_fields:\n setattr(self, attribute, value)\n self.updated_at = datetime.now()\n self.save()", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "async def modify(\n self, item: T, update: Union[dict, MotycQuery], *,\n inject_default_id: bool = None,\n ) -> T:\n\n assert isinstance(item, BaseModel), \"Can only handle BaseModel, not dict i.g.\"\n\n document = item.dict(by_alias=True)\n\n assert document.get(self.identity) is not None, f\"Need identity ({self.identity}) to update model.\"\n\n return await self.update_one(\n {self.identity: document[self.identity]},\n update,\n inject_default_id=inject_default_id\n )", "def update_db(table, set, wherecond):\n query = \"UPDATE \" + table + \" SET \" + set + \" WHERE \" + wherecond\n print(query)\n cursor.execute(query)\n db.commit()\n print(cursor.rowcount, \"record updated in db: \" + table)", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "async def update_one(\n self,\n update_document: Dict[str, Any],\n *,\n filter: Optional[Dict[str, Any]] = DEFAULT_FILTER,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> UpdateResult:\n return await self._database.update_one(\n self.name,\n update_document=update_document,\n filter=filter,\n session=session,\n **kwargs,\n )", "def updateAll(self):\n \tself.idToUpdate=''\n \tself.newState=''\n \tself.save()", "def update(self, _values=None, **values):\n if _values is not None:\n values.update(_values)\n\n values = OrderedDict(sorted(values.items()))\n\n bindings = list(values.values()) + self.get_bindings()\n\n sql = self._grammar.compile_update(self, values)\n\n return self._connection.update(sql, self._clean_bindings(bindings))", "def test_do_update_all(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_update_all()\r\n\r\n assert _error_code == 0\r\n assert _msg == (\"RAMSTK SUCCESS: Updating all records in the \"\r\n \"function table.\")", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "def update(self, table_name, fields, keys=None, any=False, eids=None):\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: update(%r, eids=%r)\" % (table_name, fields, eids))\n if isinstance(eids, list):\n return table.update(fields, eids=eids)\n else:\n return table.update(fields, eids=[eids])\n else:\n LOGGER.debug(\"%r: update(%r, keys=%r)\" % (table_name, fields, keys))\n return table.update(fields, self._getQuery(keys, any))", "def update_many(objects, fields=[], using=\"default\"):\n if not objects:\n return\n\n import django.db.models\n from django.db import connections\n con = connections[using]\n\n names = fields\n meta = objects[0]._meta\n fields = [f for f in meta.fields if not isinstance(f, django.db.models.AutoField) and (not names or f.name in names)]\n\n if not fields:\n raise ValueError(\"No fields to update, field names are %s.\" % names)\n\n fields_with_pk = fields + [meta.pk]\n parameters = []\n for o in objects:\n parameters.append(tuple(f.get_db_prep_save(f.pre_save(o, True), connection=con) for f in fields_with_pk))\n\n table = meta.db_table\n assignments = \",\".join((\"%s=%%s\"% con.ops.quote_name(f.column)) for f in fields)\n con.cursor().executemany(\n \"update %s set %s where %s=%%s\" % (table, assignments, meta.pk.column),\n parameters)", "def test_update_using_positional_operator(self):\n\n class Comment(EmbeddedDocument):\n by = StringField()\n votes = IntField()\n\n class BlogPost(Document):\n title = StringField()\n comments = ListField(EmbeddedDocumentField(Comment))\n\n BlogPost.drop_collection()\n\n c1 = Comment(by=\"joe\", votes=3)\n c2 = Comment(by=\"jane\", votes=7)\n\n BlogPost(title=\"ABC\", comments=[c1, c2]).save()\n\n BlogPost.objects(comments__by=\"jane\").update(inc__comments__S__votes=1)\n\n post = BlogPost.objects.first()\n assert post.comments[1].by == \"jane\"\n assert post.comments[1].votes == 8", "def update(self, **kwargs):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"update {}\".format(item))\n item.update(**kwargs)", "def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):\n if nowait and skip_locked:\n raise ValueError(\"The nowait option cannot be used with skip_locked.\")\n obj = self._chain()\n obj._for_write = True\n obj.query.select_for_update = True\n obj.query.select_for_update_nowait = nowait\n obj.query.select_for_update_skip_locked = skip_locked\n obj.query.select_for_update_of = of\n obj.query.select_for_no_key_update = no_key\n return obj", "def update_postgres(schema, table, values, where_clause):\n conn = logic_db_connection()\n try:\n cur = conn.cursor()\n insert_dml = \"\"\"UPDATE {0}.{1}\n SET {2}\n WHERE {3}\n ;\"\"\".format(schema, table, values, where_clause)\n cur.execute(insert_dml)\n conn.commit()\n except Exception as e:\n print(f'Unable to update Postgres table {table}. DML: {insert_dml} Error: {e}')\n raise\n finally:\n conn.close()\n return", "def update_document(collection: str, query: dict, data: dict) -> None:\n validate_arguments({'collection': [collection, str],\n 'query': [query, dict],\n 'data': [data, dict]})\n new_document = find_document(collection, query=query)\n if new_document is None:\n raise Exception('Didnt find a document to update')\n DB[collection].delete_one(query)\n for key in data:\n new_document[key] = data[key]\n add_document(collection, new_document)" ]
[ "0.6956026", "0.6809811", "0.67811257", "0.6622099", "0.6616726", "0.6503182", "0.6379676", "0.63170165", "0.62703687", "0.62077737", "0.6169037", "0.61661786", "0.61275154", "0.6101097", "0.60837984", "0.6061122", "0.60530907", "0.6043129", "0.60077256", "0.5953688", "0.59471816", "0.5946596", "0.59356356", "0.5913326", "0.5871918", "0.5859458", "0.5853548", "0.5778073", "0.5746603", "0.5741358", "0.57352155", "0.5731725", "0.5715639", "0.5701526", "0.5687051", "0.5658797", "0.56422794", "0.56380767", "0.56347257", "0.5621815", "0.5610509", "0.5609754", "0.55874205", "0.5574745", "0.55628586", "0.5557369", "0.55173355", "0.54873633", "0.5479277", "0.5472755", "0.54654574", "0.5463995", "0.545817", "0.5436212", "0.54175484", "0.5397223", "0.53968424", "0.5382417", "0.5381178", "0.5380198", "0.5353972", "0.5344179", "0.5342544", "0.5326088", "0.5311926", "0.5309231", "0.53006345", "0.5295337", "0.52951", "0.5292844", "0.5279151", "0.52765137", "0.5250888", "0.5249628", "0.52144194", "0.5207528", "0.51989716", "0.51881105", "0.51777315", "0.5172665", "0.517209", "0.5169096", "0.51683354", "0.5146652", "0.5135561", "0.5133284", "0.5129675", "0.5128122", "0.5106673", "0.5088276", "0.5081975", "0.50793046", "0.5078123", "0.5066836", "0.50659984", "0.50519943", "0.5047307", "0.50448394", "0.50354236", "0.50279194", "0.5023122" ]
0.0
-1
Given a primary key, update the referenced object according to the update clause
def test_update_one(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)", "def update(self):\n ret = True\n fields = self.depopulate(True)\n q = self.query\n q.set_fields(fields)\n\n pk = self.pk\n if pk:\n q.is_field(self.schema.pk.name, pk)\n\n else:\n raise ValueError(\"You cannot update without a primary key\")\n\n if q.update():\n fields = q.fields\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def update(self, obj, data):\n self.get(obj[self.model.pk_field.name])\n self.validate_fields(data)\n\n fields = []\n values = []\n\n for k, v in data.iteritems():\n if k in self.model.get_fields_name():\n fields.append(k)\n values.append(v)\n\n conn = self.get_connector()\n cursor = conn.cursor()\n update = \" ,\".join([\"{0}='{1}'\".format(f, v) for f, v in zip(fields,\n values)])\n query = \"update {0} set {1} WHERE {2}={3}\".format(\n self.ressource_config[\"table\"],\n update,\n self.model.pk_field.name,\n obj[self.model.pk_field.name]\n )\n\n cursor.execute(query)\n conn.commit()\n conn.close()\n\n return self.get(obj[self.model.pk_field.name])", "def update_row(self, pk, row_dict):\n return self.execute(self.commands.update_row(\n self.name,\n col_val=self._join_equality(row_dict),\n pk_col=self.primary_key_column,\n pk=pk\n ))", "def update(self, request, pk):\n if pk is None:\n for item in request.data:\n # get object by its primary key\n obj = self._object_get(item[self.model._meta.pk.attname])\n self._object_update(obj, item)\n else:\n obj = self._object_get(pk)\n self._object_update(obj, request.data)\n return obj", "def update(self, **kw):\n colmap = {}\n for k, v in kw.iteritems():\n colmap[self.__attrmap__[k]] = v\n\n yield Update(\n colmap,\n Where=self._primaryKeyComparison(self._primaryKeyValue())\n ).on(self.transaction)\n\n self.__dict__.update(kw)", "def update(self, async=True):\n\n self.update_element(\n condition=QueryBuilder.build_pk_clause(self.__table__, **self.get_pk_fields()),\n async=async, **self.get_non_pk_fields(filtered=True)\n )", "def update(self, openid=None, **kwargs):\n assert openid\n\n with db.session.begin_nested():\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n db.session.merge(self)\n db.session.commit()\n return self", "def _update(self, model_obj):\n conn = self._get_session()\n db_item = None\n\n # Fetch the record from database\n try:\n identifier = getattr(model_obj, id_field(self.entity_cls).attribute_name)\n db_item = conn.query(self.model_cls).get(\n identifier\n ) # This will raise exception if object was not found\n except DatabaseError as exc:\n logger.error(f\"Database Record not found: {exc}\")\n raise\n\n if db_item is None:\n conn.rollback()\n conn.close()\n raise ObjectNotFoundError(\n {\n \"_entity\": f\"`{self.entity_cls.__name__}` object with identifier {identifier} \"\n f\"does not exist.\"\n }\n )\n\n # Sync DB Record with current changes. When the session is committed, changes are automatically synced\n try:\n for attribute in attributes(self.entity_cls):\n if attribute != id_field(self.entity_cls).attribute_name and getattr(\n model_obj, attribute\n ) != getattr(db_item, attribute):\n setattr(db_item, attribute, getattr(model_obj, attribute))\n except DatabaseError as exc:\n logger.error(f\"Error while updating: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return model_obj", "def update(self):\n values = {}\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if not attr.auto_value and attr._updated:\n values[field] = getattr(self, field)\n object.__setattr__(attr, '_updated', False)\n result = self.where({self.__class__.get_primary():self.primary})\n if len(values) == 0:\n logging.warning('update() called on model with no changed fields.')\n return None\n return result.update(values)[0]", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def update(self, key, node, local_edges, foreign_edges, transaction_id):\n assert self.row_exists(key, transaction_id), \"Key does not exist\"\n\n last_node = self.rows[key][-1]\n node = last_node.copy(node, local_edges, foreign_edges, transaction_id)\n self._create_or_update_row(key, node)", "def update(self, data, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.update({'_id': id_obj}, {\"$set\": data})\n return self.collection.update(query_data, {\"$set\": data})", "def update(self):\n sql = 'UPDATE {} SET {} where {}=%s'.format(\n self.TABLE_NAME,\n ', '.join(map(lambda f: '{}=%s'.format(f), self._dict)),\n self.PRIMARY_KEY\n )\n args = list(map(self._get_value_or_default, self._dict))\n args.append(self._get_value_or_default(self.PRIMARY_KEY))\n cursor = yield self._pool.execute(sql, args)\n count = cursor.rowcount\n result = True if count == 1 else False\n return result", "def update(self, obj, parent=None, **attrs):\n if isinstance(obj, ObjectRow):\n object_type, object_id = obj['type'], obj['id']\n else:\n object_type, object_id = obj\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n if flags & ATTR_SIMPLE and name in attrs:\n # Simple attribute needs pickle\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError, \"Can't update unknown object (%s, %d)\" % (object_type, object_id)\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = cPickle.loads(str(row[0]))\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n if isinstance(parent, ObjectRow):\n attrs['parent_type'], attrs['parent_id'] = parent['type'], parent['id']\n elif parent:\n attrs['parent_type'], attrs['parent_id'] = self._get_type_id(parent[0]), parent[1]\n\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # FIXME: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == str and type(attrs[name]) == buffer:\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = str(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n orig_attrs[ivtidx] = terms.keys()\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)", "def _update(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n identifier = model_obj.meta.id\n\n # Fetch the record from database\n try:\n # Calling `get` will raise `NotFoundError` if record was not found\n self.model_cls.get(\n id=identifier, using=conn, index=self.model_cls._index._name\n )\n except NotFoundError as exc:\n logger.error(f\"Database Record not found: {exc}\")\n raise ObjectNotFoundError(\n {\n \"_entity\": f\"`{self.entity_cls.__name__}` object with identifier {identifier} \"\n f\"does not exist.\"\n }\n )\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def update(self, commit=True, **kwargs):\n # Prevent changing IDS\n kwargs.pop('id', None)\n for attr, value in kwargs.iteritems():\n # Flask-restful makes everything None by default\n if value is not None:\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self):\r\n if self.instance is None:\r\n raise CQLEngineException(\"DML Query intance attribute is None\")\r\n assert type(self.instance) == self.model\r\n\r\n statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp)\r\n #get defined fields and their column names\r\n for name, col in self.model._columns.items():\r\n if not col.is_primary_key:\r\n val = getattr(self.instance, name, None)\r\n val_mgr = self.instance._values[name]\r\n\r\n # don't update something that is null\r\n if val is None:\r\n continue\r\n\r\n # don't update something if it hasn't changed\r\n if not val_mgr.changed and not isinstance(col, Counter):\r\n continue\r\n\r\n if isinstance(col, (BaseContainerColumn, Counter)):\r\n # get appropriate clause\r\n if isinstance(col, List): klass = ListUpdateClause\r\n elif isinstance(col, Map): klass = MapUpdateClause\r\n elif isinstance(col, Set): klass = SetUpdateClause\r\n elif isinstance(col, Counter): klass = CounterUpdateClause\r\n else: raise RuntimeError\r\n\r\n # do the stuff\r\n clause = klass(col.db_field_name, val,\r\n previous=val_mgr.previous_value, column=col)\r\n if clause.get_context_size() > 0:\r\n statement.add_assignment_clause(clause)\r\n else:\r\n statement.add_assignment_clause(AssignmentClause(\r\n col.db_field_name,\r\n col.to_database(val)\r\n ))\r\n\r\n if statement.get_context_size() > 0 or self.instance._has_counter:\r\n for name, col in self.model._primary_keys.items():\r\n statement.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(statement)\r\n\r\n self._delete_null_columns()", "def update(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.update(self.__class__.__name__, data['id'], data)\n\n self.__dict__.update(saved_data)", "def update(self):\n if not self.id:\n raise DataValidationError(\"Update called with empty ID field\")\n db.session.commit()\n db.session.refresh(self)", "def put(self, request, pk):\n return self.update(request, pk)", "def update(self, inRecord, keyIndex=None):\n if keyIndex == None:\n keyIndex = 0\n\n conditions = \",\".join([a + \"='\" + b + \"'\" for a,b in\n zip(self.fieldNames,inRecord)]) \n\n # Now figure out what changed, and change it in the table.\n self._db._c.execute(\"UPDATE \" + self.tableName + \" SET \" +\n conditions +\n \" WHERE \" + self.fieldNames[keyIndex] + \" = ?\",\n (inRecord[keyIndex],))\n \n self._db._conn.commit()", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def oe_update(self, cr, uid, external_session, existing_rec_id, vals, resource, defaults, context=None):\n if context is None: context={}\n context['referential_id'] = external_session.referential_id.id #did it's needed somewhere?\n return self.write(cr, uid, existing_rec_id, vals, context)", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "async def update(self, pk, payload):\n\n self.log.info(payload)\n await self.db.update(pk, payload)\n return await self.get_one(pk)", "def update(self, table, element):\n\n if \"id\" not in element.keys():\n return False\n fields = []\n conditions = []\n values = []\n for key in element.keys():\n if key != \"id\":\n fields.append(key)\n values.append(element[key])\n conditions.append(\"id =\")\n values.append(element[\"id\"])\n result = self.__update(table, fields, conditions, values)\n\n return result", "def update(self, table, id, **kwargs):\n pairs = [column + ' = %s' for column in kwargs.keys()]\n values = kwargs.values()\n if 'changed_by' in self.get_columns(table):\n pairs += ['changed_by = %s', 'date_changed = now()']\n values += [self.user_id]\n self.db.execute(\n 'update %s set %s where %s = %%s' %\n (table, ', '.join(pairs), table + '_id'), *(values + [id]))\n return id", "def update_by_id(cls, id, name, author_id):\n\t\tbook = Book.query.get(id)\n\t\tbook.name = name\n\t\tbook.authors_id = author_id\n\t\tdb.session.commit()", "def update(self, obj, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n object_type, object_id = self._to_obj_tuple(obj)\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n # If the updated attribute is stored in the pickle (either a simple attr\n # or an case-insensitive indexed attr in which __foo is in the pickle)\n # then we must first retrieve the pickle for this object from the db.\n if (flags & ATTR_SIMPLE or flags & ATTR_INDEXED_IGNORE_CASE == ATTR_INDEXED_IGNORE_CASE) and \\\n name in attrs:\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError(\"Can't update unknown object (%s, %d)\" % (object_type, object_id))\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = self._unpickle(row[0])\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # TODO: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == BYTES_TYPE and isinstance(attrs[name], RAW_TYPE):\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = BYTES_TYPE(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n if not terms and ivtidx in orig_attrs:\n # Update removed all terms for this ivtidx, remove from pickle.\n orig_attrs[ivtidx] = None\n elif terms:\n # There are terms for this ivtidx, store in pickle.\n orig_attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)\n self._set_dirty()\n # TODO: if an objectrow was given, return an updated objectrow", "def update(self, table, primaryKeyName, primaryKeyValue, options):\r\n keys = \"\"\r\n if not isinstance(options, dict):\r\n raise ValueError, \"Expected 'options' argument to be a dictionary, instead received: %s\" % type(options).__name__\r\n if options:\r\n for key, value in options.iteritems():\r\n if isinstance(key, str):\r\n key = key.replace(\"'\", \"''\")\r\n\r\n if isinstance(value, str):\r\n value = value.replace(\"'\", \"''\")\r\n keys += \"%s='%s',\" % (key, value)\r\n keys = keys[:-1]\r\n query = \"UPDATE \" + str(table) + \" SET \" + keys + \" WHERE \" + str(primaryKeyName) + \"='\" + str(primaryKeyValue) + \"'\"\r\n self.execute(query)", "def update(self, **kwargs):\n assert kwargs, 'No fields specified for update'\n self._verify_mutation_allowed()\n fields = comma_join('`%s` = %s' % (name, arg_to_sql(expr)) for name, expr in kwargs.items())\n conditions = (self._where_q & self._prewhere_q).to_sql(self._model_cls)\n sql = 'ALTER TABLE $db.`%s` UPDATE %s WHERE %s' % (self._model_cls.table_name(), fields, conditions)\n self._database.raw(sql)\n return self", "def update_obj(obj, attributes, params):\n for key in params.keys():\n if key in attributes:\n try:\n set_attribute(obj, key, params[key])\n except:\n abort(400)\n \n Session.flush()\n Session.commit()", "def update_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = dict(doc=obj.get_document_body())\n try:\n ES.update(index=index, doc_type=doc_type, body=body, id=obj.pk)\n except NotFoundError:\n raise DocumentNotFound(obj.get_index_name(), obj.pk)", "async def modify(\n self, item: T, update: Union[dict, MotycQuery], *,\n inject_default_id: bool = None,\n ) -> T:\n\n assert isinstance(item, BaseModel), \"Can only handle BaseModel, not dict i.g.\"\n\n document = item.dict(by_alias=True)\n\n assert document.get(self.identity) is not None, f\"Need identity ({self.identity}) to update model.\"\n\n return await self.update_one(\n {self.identity: document[self.identity]},\n update,\n inject_default_id=inject_default_id\n )", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def set_one(self, table, q_filter, update_dict, fail_on_empty=True, unset=None, pull=None, push=None,\n push_list=None, pull_list=None):\n with self.lock:\n for i, db_item in self._find(table, self._format_filter(q_filter)):\n updated = self._update(db_item, update_dict, unset=unset, pull=pull, push=push, push_list=push_list,\n pull_list=pull_list)\n return {\"updated\": 1 if updated else 0}\n else:\n if fail_on_empty:\n raise DbException(\"Not found entry with _id='{}'\".format(q_filter), HTTPStatus.NOT_FOUND)\n return None", "def update_by_id(cls, id, name, surname):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.name = name\n\t\tauthor.surname = surname\n\t\tdb.session.commit()", "def sqlite3_update_record(data_base, table, param_column, param_value, id_column, record_id):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n try:\n query = 'UPDATE ' + table + ' SET ' + param_column + ' = \"' + param_value + '\" WHERE ' + id_column + \\\n \" = '\" + record_id + \"'\"\n cur.execute(query)\n except sqlite3.OperationalError:\n pass\n con.commit()\n cur.close()\n con.close()", "def _update_model_instance(self, obj_filter_dict, new_data_dict):\n obj = self._get_or_create_model_instance(obj_filter_dict)\n obj.modify(**new_data_dict)", "def update(self, **kwargs):\n expr = self.model.__table__.update().where(self.query).values(**kwargs)\n return self._runquery(expr)", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def _update_object(self, data_dict):\r\n pass", "def patch_record(\n self, id_: str, fields: Dict[str, Union[str, list, None]]\n ) -> None:\n instance = self._get(id_)\n instance.update(fields)\n self.db.session.commit()", "def update(self, table_name, data, id_column_name='id'):\n table = self._create_table(table_name)\n for row in data:\n try:\n statement = table.update() \\\n .where(table.c[id_column_name] == row[id_column_name]) \\\n .values(**row)\n self.cursor.execute(statement)\n except Exception as e:\n print (e)", "async def update_one(self, where, data):\n\n pass", "def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, notes: str = None,\n modified_by: str = None, created_by: str = None, my_conn: Optional[dict] = None,\n t_log: Optional[TimeLogger] = None, verbose: bool = None):\n\n if my_conn is None:\n my_conn = self.my_conn\n else:\n self.my_conn = my_conn\n\n if verbose is True and t_log is None:\n t_log = TimeLogger()\n\n my_conn = my_connect(my_conn=my_conn, t_log=t_log, verbose=verbose)\n conn = my_conn['conn']\n db_params = my_conn['db_params']\n\n if where_key is None:\n where_key = self.id_name()\n\n if an_id is None:\n warn(\"No Record ID Specified\", NoRecordIDError)\n else:\n if data is None:\n data = {}\n\n data.update(add_field('name', name))\n data.update(add_field('notes', notes))\n data.update(add_field('created_by', created_by))\n\n # If there is no data, then skip. Of course one could still change modified by:\n if len(data) > 0 or modified_by is not None:\n\n # Always require a modified by and because one can change data without specifying a modifer,\n # this is necessary. We don't check it before the previous if, because we don't want to create\n # a modified_by if not data was set and no modified_by was set.\n if modified_by is None:\n modified_by = db_params['user']\n\n data.update(modified_by=modified_by)\n\n fields = data.keys()\n\n sql = \"UPDATE {table} SET {fields} WHERE {pkey} = {a_value}\"\n\n if verbose:\n print('Data:\\n', data)\n print('\\nFields:\\n', fields)\n\n query = SQL(sql).format(\n table=Identifier(self.table_name),\n fields=SQL(', ').join(\n Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields\n ),\n pkey=Identifier(where_key),\n a_value=Placeholder('where_key')\n )\n\n data.update(where_key=an_id)\n\n cur = conn.cursor(cursor_factory=NamedTupleCursor)\n\n if verbose:\n print(query.as_string(conn))\n print(cur.mogrify(query, data))\n\n try:\n cur.execute(query, data)\n except OperationalError as error:\n print(error)\n\n conn.commit()\n\n cur.close()\n\n self.pull_data()", "def update(self, updates, predicate):\n for row in self.rows:\n if predicate(row):\n for column, new_value in updates.items():\n row[column] = new_value", "def update_by_id(self, subject_id: str, new_subject_data: any) -> any:\n pass", "def update(self, data: dict):\n for key in data:\n model_att = getattr(self.__class__, key, None)\n value = data.get(key)\n\n setattr(self, key, type(model_att.type.python_type())(value))\n\n self.commit()\n return self", "def update(self):\n with managed_session() as session:\n session.merge(self)", "def abstract_update(self, model, id, params):\n # we check that the given fields exist\n self.check_fields_existence(model, params.keys())\n\n # we get the record and update\n record = self.abstract_get(model, id)\n record.write(self._prepare_params(params))\n\n return record", "def update_one_address(update_dict,id,id_address,con,cur):\n psql=\"update address set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{id_address}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()", "def update_one_address(update_dict,id,id_address,con,cur):\n psql=\"update address set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{id_address}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()", "def update_item(self, table_name: str, primary_key: dict, update: dict):\n table = self.resource.Table(table_name)\n\n update_expression = 'SET '\n updates = []\n for key, value in update.items():\n # Add a suffix the key to create a substitute name for it to\n # prevent conflicts with a reserved DynamoDB word.\n # Refer the following for more details:\n # - https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html\n # - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html\n substitute_key = f'#{key}_key'\n substitute_value = f':{key}_value'\n updates.append({\n 'key': key,\n 'value': value,\n 'substitute_key': substitute_key,\n 'substitute_value': substitute_value,\n })\n update_expression += f'{substitute_key} = {substitute_value}, '\n update_expression = update_expression[:-2] # remove the last ', '\n\n table.update_item(\n Key=primary_key,\n UpdateExpression=update_expression,\n ExpressionAttributeNames={\n u['substitute_key']: u['key'] for u in updates\n },\n ExpressionAttributeValues={\n u['substitute_value']: u['value'] for u in updates\n },\n )", "def _update(self, commit=False):\n votes = Vote.objects.filter(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n obj_score = sum([v.score for v in votes])\n obj_votes = len(votes)\n\n score, created = Score.objects.get_or_create(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n defaults = dict(\n score = obj_score,\n votes = obj_votes,\n )\n )\n if not created:\n score.score = obj_score\n score.votes = obj_votes\n score.save()\n self.score = obj_score\n self.votes = obj_votes\n if commit:\n self.instance.save()", "def update(self, *args):\n qry = UpdateEntityQuery(self)\n self.context.add_query(qry)\n return self", "def update(self, table_name, fields, keys=None, any=False, eids=None):\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: update(%r, eids=%r)\" % (table_name, fields, eids))\n if isinstance(eids, list):\n return table.update(fields, eids=eids)\n else:\n return table.update(fields, eids=[eids])\n else:\n LOGGER.debug(\"%r: update(%r, keys=%r)\" % (table_name, fields, keys))\n return table.update(fields, self._getQuery(keys, any))", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, record):\n record = dict_for_mongo(record)\n id_dict = {'_id': self.record['_id']}\n self.collection.update(id_dict, {'$set': record})\n\n # Set record to the latest record from the database\n self.record = self.__class__.collection.find_one(id_dict)", "def update(example_object_id, example_object):\n # Get the example_object requested from the db into session\n update_example_object = ExampleObject.query.filter(\n ExampleObject.example_object_id == example_object_id\n ).one_or_none()\n\n # Try to find an existing example_object with the same name as the update\n field1 = example_object.get(\"field1\")\n field2 = example_object.get(\"field2\")\n\n existing_example_object = (\n ExampleObject.query.filter(ExampleObject.field1 == field1)\n .filter(ExampleObject.field2 == field2)\n .one_or_none()\n )\n\n # Are we trying to find a example_object that does not exist?\n if update_example_object is None:\n abort(\n 404,\n \"ExampleObject not found for Id: {example_object_id}\".format(example_object_id=example_object_id),\n )\n\n # Would our update create a duplicate of another example_object already existing?\n elif (\n existing_example_object is not None and existing_example_object.example_object_id != example_object_id\n ):\n abort(\n 409,\n \"ExampleObject {field1} {field2} exists already\".format(\n field1=field1, field2=field2\n ),\n )\n\n # Otherwise go ahead and update!\n else:\n\n # turn the passed in example_object into a db object\n schema = ExampleObjectSchema()\n update = schema.load(example_object, session=db.session)\n\n # Set the id to the example_object we want to update\n update.example_object_id = update_example_object.example_object_id\n\n # merge the new object into the old and commit it to the db\n db.session.merge(update)\n db.session.commit()\n\n # return updated example_object in the response\n data = schema.dump(update_example_object)\n\n return data, 200", "def test_update_using_positional_operator(self):\n\n class Comment(EmbeddedDocument):\n by = StringField()\n votes = IntField()\n\n class BlogPost(Document):\n title = StringField()\n comments = ListField(EmbeddedDocumentField(Comment))\n\n BlogPost.drop_collection()\n\n c1 = Comment(by=\"joe\", votes=3)\n c2 = Comment(by=\"jane\", votes=7)\n\n BlogPost(title=\"ABC\", comments=[c1, c2]).save()\n\n BlogPost.objects(comments__by=\"jane\").update(inc__comments__S__votes=1)\n\n post = BlogPost.objects.first()\n assert post.comments[1].by == \"jane\"\n assert post.comments[1].votes == 8", "def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})", "def update_model(key, jdict, put=True):\n key = ndb.Key(urlsafe=key)\n model = key.get()\n if model is None:\n return False, 404\n\n if \"email\" in jdict:\n model.email = jdict[\"email\"]\n # note this operation changes the entity's key\n model.key.delete()\n model.key = ndb.Key(AddressEntry, model.email)\n\n if \"name\" in jdict:\n model.name = jdict[\"name\"]\n\n if put:\n model.put()\n return True, model", "def run_update_example():\n table = \"actors\"\n update_values = {\n 'name': \"Christopher\",\n 'last_name': \"Eccleston\"}\n update_conds = {'id': 1}\n print querify.update_from_dict(table, update_values, update_conds)", "def patch(self, request, pk):\n return self.partial_update(request, pk)", "def update(self, document_id, update_spec, namespace, timestamp):\n\n index, doc_type = self._index_and_mapping(namespace)\n document = self.BulkBuffer.get_from_sources(index,doc_type,u(document_id))\n if document:\n updated = self.apply_update(document, update_spec)\n # _id is immutable in MongoDB, so won't have changed in update\n updated['_id'] = document_id\n self.upsert(updated, namespace, timestamp)\n else:\n updated = {\"_id\": document_id}\n self.upsert(updated, namespace, timestamp, update_spec)\n # upsert() strips metadata, so only _id + fields in _source still here\n return updated", "def update_record(self, collection_name, update_record, update_condition):\n try:\n self.logger.info('in update_record()')\n collection = self.get_db()[collection_name]\n collection.update_one(update_condition, {\"$set\": update_record})\n self.logger.info('out update_record()')\n except Exception as e:\n self.logger.error(f'Error occurred while updating record {e}')", "def update_many(\n cls,\n *,\n pks: List[Union[str, int]],\n update: Dict[str, Any],\n sychronize_session: bool = False,\n ) -> None:\n if pks:\n db.session.query(cls).filter(\n getattr(cls, cls.get_primary_key()).in_(pks)\n ).update(update, synchronize_session=sychronize_session)\n db.session.commit()\n cache.delete_many(*(cls.create_cache_key(pk) for pk in pks))", "def update_object(self, instance, using=None, **kwargs):\n # Check to make sure we want to index this first.\n if self.should_update(instance, **kwargs):\n backend = self.get_backend(using)\n\n if backend is not None:\n backend.update(self, [instance])", "def update(self, collection_id, parent_id, object_id, object,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n obj = Session.query(self.collection).get(object_id)\n # TODO: verify permissions\n if obj is None:\n obj = self.create(collection_id=collection_id, parent_id=parent_id,\n record=object, unique_fields=unique_fields,\n id_field=id_field, modified_field=modified_field,\n auth=None)\n else:\n for k, v in object.items():\n setattr(obj, k, v)\n return obj.deserialize()", "def update_column(conn, table, column, pk_column, pk, value) -> None:\n with conn.cursor() as curs:\n query = sql.SQL('UPDATE {} SET {} = %s WHERE {} = %s').format(sql.Identifier(table), sql.Identifier(column),\n sql.Identifier(pk_column))\n values = (value, pk)\n curs.execute(query, values)\n conn.commit()", "def on_put(self, req, resp, table, id):\n user = req.context['user']\n pairs = req.context['doc']['values']\n keys = pairs.keys()\n set_clause = [\"`{}`=:{}\".format(k, k) for k in keys]\n set_clause = ','.join(set_clause)\n engine = user_db_engine(user)\n query = \"UPDATE {} SET {} WHERE id=:id\".format(table, set_clause)\n try:\n pairs['id'] = int(id)\n except ValueError:\n raise exceptions.HTTPBadRequestError(\"Invalid ID\")\n\n with engine.new_session() as conn:\n result = conn.execute(query, pairs)\n\n if config.use_cache():\n key = _make_key(engine, table, \"*\", id, -1)\n cache.invalidate_query_pattern(\"{}\".format(key))\n resp.context['result'] = {'result': 'ok'}\n resp.status = falcon.HTTP_200", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def update(self, obj):\n self.identity_map[obj._instance_key] = obj\n self.register_dirty(obj)", "def record_update_for_user(record_id, values):\n session = get_session()\n with session.begin():\n record_ref = get_user_record(record_id, session=session)\n record_ref.update(values)\n record_ref.save(session=session)", "def upsert(self, ctx, data, keys = []):\n\n # TODO: Check for AutoIncrement in keys, shall not be used\n\n # If keys\n qfilter = {}\n if (len(keys) > 0):\n for key in keys:\n try:\n qfilter[key] = data[key]\n except KeyError as e:\n raise Exception(\"Could not find attribute '%s' in data when storing row data: %s\" % (key, data))\n else:\n pk = self.pk(ctx)\n qfilter[pk[\"name\"]] = data[pk[\"name\"]]\n\n # Do lookup\n if len(qfilter) > 0:\n\n row = self.lookup(ctx, qfilter)\n\n if (row):\n # Check row is identical\n for c in self.columns:\n if c[\"type\"] != \"AutoIncrement\":\n v1 = row[c['name']]\n v2 = data[c['name']]\n if c[\"type\"] == \"Date\":\n v1 = row[c['name']].strftime('%Y-%m-%d')\n v2 = data[c['name']].strftime('%Y-%m-%d')\n if (isinstance(v1, str) or isinstance(v2, str)):\n if (not isinstance(v1, str)): v1 = str(v1)\n if (not isinstance(v2, str)): v2 = str(v2)\n if (v1 != v2):\n if (c[\"name\"] not in self._lookup_changed_fields):\n logger.warn(\"%s updating an entity that exists with different attributes, overwriting (field=%s, existing_value=%s, tried_value=%s)\" % (self, c[\"name\"], v1, v2))\n #self._lookup_changed_fields.append(c[\"name\"])\n\n # Update the row\n row = self.update(ctx, data, keys)\n return row\n\n row_with_id = self.insert(ctx, data)\n return row_with_id", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def update( id=UNDEFINED, identifier=UNDEFINED, prioritya=UNDEFINED, priorityb=UNDEFINED, priorityc=UNDEFINED, priorityd=UNDEFINED, prioritye=UNDEFINED, created_at=UNDEFINED, updated_at=UNDEFINED, published_at=UNDEFINED, processed=UNDEFINED):\n errors = []\n params = dict( filter( lambda i: not isinstance( i, SV ), locals().items() ) )\n if 'id' not in params:\n return None, [(\"'id' is a required parameter to update\", None, None)]\n n, errors = note.update( **params )\n try: \n return db_to_model(n), errors\n except:\n return None, [sys.exc_info()] + errors", "def update(self, identifier, data):\n self.client.request_with_method(Methods.UPDATE % (self.name, identifier,),\n data=data)", "def model_update(self, db):\n db.session.commit()", "def update(self, data, on='identity'):\n ds_left = (self._meta, self._data)\n update_meta = self._meta.copy()\n update_items = ['columns@{}'.format(name) for name\n in data.columns.tolist()]\n update_meta['sets']['update'] = {'items': update_items}\n ds_right = (update_meta, data)\n merged_meta, merged_data = _hmerge(\n ds_left, ds_right, on=on, from_set='update', verbose=False)\n self._meta, self._data = merged_meta, merged_data\n del self._meta['sets']['update']\n return None", "def record_update_for_project_by_id(record_id, values):\n values['updated_at'] = datetime.datetime.utcnow()\n\n session = get_session()\n with session.begin():\n record_ref = get_project_record_by_id(record_id, session=session)\n record_ref.update(values)\n record_ref.save(session=session)\n\n return record_ref", "def update(table, id_):\n\n # your code\n\n return table", "def test_update_using_positional_operator_embedded_document(self):\n\n class Vote(EmbeddedDocument):\n score = IntField()\n\n class Comment(EmbeddedDocument):\n by = StringField()\n votes = EmbeddedDocumentField(Vote)\n\n class BlogPost(Document):\n title = StringField()\n comments = ListField(EmbeddedDocumentField(Comment))\n\n BlogPost.drop_collection()\n\n c1 = Comment(by=\"joe\", votes=Vote(score=3))\n c2 = Comment(by=\"jane\", votes=Vote(score=7))\n\n BlogPost(title=\"ABC\", comments=[c1, c2]).save()\n\n BlogPost.objects(comments__by=\"joe\").update(\n set__comments__S__votes=Vote(score=4)\n )\n\n post = BlogPost.objects.first()\n assert post.comments[0].by == \"joe\"\n assert post.comments[0].votes.score == 4", "def test_updating_record_with_dictionary_args(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, {\"age\": 10})\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10", "def update_element(cls, condition=None, async=True, **kwargs):\n\n command = cls.__table__.update().values(**kwargs)\n\n if condition is not None:\n command = command.where(condition)\n\n return DBConnection.execute_command(command=command, async=async)", "def my_find_update(the_coll, search_dict, update_dict):\n x = the_coll.find(search_dict,limit=1)\n if x.count() == 0:\n the_coll.insert(update_dict)\n else:\n for x in the_coll.find(search_dict):\n x.update(update_dict)\n the_coll.save(x)", "def update(cls, collection, uid, data):\n validated = cls.validate(data)\n validated.pop(\"_id\", None) # remove field \"_id\" if set\n object_uid = cls.object_id(uid)\n collection.update_one({\"_id\": object_uid}, {\"$set\": validated}, upsert=True)\n return collection.find_one({\"_id\": object_uid})", "def test_updating_record_with_kwargs(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, age=10)\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n\n to_set = [\n \"{0} = %({0})s\".format(f) for f in fields_to_modify\n if f in self.editable_fields\n ]\n if len(to_set) == 0:\n print('Warning: No data to set', data)\n return\n\n query = \"UPDATE {} SET {} WHERE {}\".format(\n self._NAME,\n ', '.join(to_set),\n ' AND '.join(\"{0} = %({0})s\".format(f) for f in self.primary_fields),\n )\n self._execute(query, data)", "def update(self, **kwargs):\n print(\"Updating model\")\n print(kwargs)\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def updateItem(self, object):\n pass", "async def update_one(\n self,\n where: t.Mapping[str, t.Any],\n data: t.Mapping[str, t.Any],\n ) -> t.Optional[t.Type[Model]]:\n\n data = await self.collection.find_one_and_update(\n filter=where,\n update={'$set': data},\n return_document=ReturnDocument.AFTER,\n )\n return self.model_class(**data) if data else None", "def test_allow_relaxed_update():\n starting_db = create_db(STARTING_DB_INPUT)\n response = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n json.dumps({\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }))\n assert response == {\n \"uid\": \"some_uid\",\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }", "def update(self, key, val):\n if key in self._datastore:\n self._datastore[key] = val\n return True\n else:\n raise KeyError(\n \"Tried to update a non existing record\"\n )", "def salesforce_update(self, obj_name, obj_id, **kwargs):\n self.builtin.log(\n \"Updating {} {} with values {}\".format(obj_name, obj_id, kwargs)\n )\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.update(obj_id, kwargs)", "def update( d, **kw):\n d.update( d, **kw )\n return d", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, key, *columns):\n timestamp = int(time())\n return self.table.update(key, timestamp, *columns)" ]
[ "0.686397", "0.6536987", "0.6483919", "0.6443422", "0.64308685", "0.63702625", "0.6346544", "0.6217846", "0.6217339", "0.61647564", "0.611606", "0.6064789", "0.6061465", "0.6044797", "0.6041759", "0.59926885", "0.5989239", "0.59867716", "0.5947112", "0.5912526", "0.5893085", "0.5885119", "0.58620554", "0.58392495", "0.58266556", "0.5823993", "0.579499", "0.5791871", "0.5781438", "0.57491237", "0.5740912", "0.5737276", "0.5705626", "0.56978256", "0.5667782", "0.565772", "0.56561375", "0.5644177", "0.5638851", "0.56378376", "0.56259185", "0.5622954", "0.5615575", "0.5614411", "0.56117094", "0.5609064", "0.56052923", "0.5586609", "0.55857253", "0.5577383", "0.55707425", "0.556741", "0.5558957", "0.5558957", "0.5541922", "0.5541707", "0.5536608", "0.55292904", "0.55225825", "0.5513534", "0.5501776", "0.5494298", "0.5489189", "0.54890937", "0.54802257", "0.5468123", "0.54620576", "0.5459345", "0.54585856", "0.5452152", "0.54423153", "0.5437689", "0.54352283", "0.5434137", "0.543377", "0.54291904", "0.54286027", "0.5423999", "0.5423354", "0.54224247", "0.54188186", "0.5413692", "0.5413079", "0.54096574", "0.5399895", "0.5380815", "0.5376436", "0.5373417", "0.537315", "0.5366069", "0.536476", "0.5362722", "0.53574187", "0.5352586", "0.5339585", "0.5331699", "0.5326534", "0.5324324", "0.53172046", "0.53172046", "0.5313973" ]
0.0
-1
Given a query, remove all (and only) object returned by query.
def test_remove(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_by_query(self, query, params = {}):\n params['hitsPerPage'] = 1000\n params['attributesToRetrieve'] = ['objectID']\n\n res = self.search(query, params)\n while (res['nbHits'] != 0):\n object_ids = []\n for elt in res['hits']:\n object_ids.append(elt['objectID'])\n task = self.delete_objects(object_ids)\n self.wait_task(task['taskID'])\n res = self.search(query, params)", "def delete(self, query):\n self.collection.remove(query)", "def _clean_query(self, query):\n for object_query in query:\n filters = object_query.get(\"filters\", {}).get(\"expression\")\n self._clean_filters(filters)\n self._macro_expand_object_query(object_query)\n return query", "def remove(self, query: dict, limit: Optional[int] = 0) -> None:\n\n matches = self.find(query, limit)\n for match in matches:\n self._db[\"documents\"].remove(match)\n\n self._dump()", "def remove_from_cache(self, query):\n return", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def unset(cls, query, unset_query):\n cls.collection.update(query, {\"$unset\": unset_query}, multi=True)", "def delete_by_query(self, query):\n query = pylastica.query.Query.create(query)\n return self.request('_query', pylastica.request.Request.DELETE, query.query)", "def delete_by_query(self, **attrs):\n attrs[\"attrs\"] = [\"id\"]\n results = self.query(**attrs)\n if len(results) == 0:\n return 0\n\n results_by_type = {}\n for o in results:\n if o[\"type\"] not in results_by_type:\n results_by_type[o[\"type\"]] = []\n results_by_type[o[\"type\"]].append(o[\"id\"])\n\n return self._delete_multiple_objects(results_by_type)", "def delete_by_query(self, **attrs):\n attrs[\"attrs\"] = [\"id\"]\n results = self.query(**attrs)\n if len(results) == 0:\n return 0\n\n results_by_type = {}\n for o in results:\n if o[\"type\"] not in results_by_type:\n results_by_type[o[\"type\"]] = []\n results_by_type[o[\"type\"]].append(o[\"id\"])\n\n return self._delete_multiple_objects(results_by_type)", "def delete(self, query):\n if query.isId():\n # simple\n url = '%s/%s/%i' % (self.uri, query.table(), query._where[0].value)\n else:\n # real query\n url = '%s/%s/filter?%s' % (self.uri, query.table(), query.encode())\n data, resp = self.execute('DELETE', url, decode=True)\n return data", "def query_remove(self,*q):\n query = self.parameters['q'].difference(q)\n params = join_params(self.parameters,\n {\"q\": query, \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)", "def remove_all_recs(self):\n return self.storage.clear()", "def remove_objects(self):\n logger.debug('Removing all objects from model.')\n del self._objects[:]", "async def delete_many(self, **query):\n\n try:\n for result in await self.db.get_many(**query):\n await result.delete()\n except IntegrityError:\n raise ConflictException(\n f\"At least one {self.db_model_name} cannot be deleted since it is actively used\"\n )", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def unset_queries(self, *args):\n for k in args:\n self._query_dict.pop(k, None)", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def delete(saved_query):\n saved_query.delete()", "def drop(cls):\n objects = cls.get_all()\n if isinstance(objects, dict) is False:\n for i in cls.get_all():\n i.delete()\n return True\n else:\n return True", "def removeAllAbstractQueries(self):\n self.queries[\"abs\"] = []", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def remove(self, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.remove(id_obj, query_data)\n return self.collection.remove(query_data)", "def remove(query):\n # type: (str) -> bool\n if not query or not SEARCH_SAVED:\n return False\n searches = retrieve()\n if query in searches:\n searches.remove(query)\n save(searches)\n return True\n return False", "def query(self, query):\n\n self.query_pass(query, node=self)\n\n self.clean(node=self)", "def delete_all(self):\n raise NotImplementedError()", "def clear(self):\n \n objects = self.get_geometry()\n\n for obj in objects:\n\n rs.DeleteObject(obj) \n \n return self", "def filter_clear(client, args):\n client.context.set_query([])", "def remove_query(iden):\r\n table = query_queue_table\r\n d = table.delete(table.c.iden == iden)\r\n d.execute()", "def delete_all_bywhereclause(cls, whereclause):\n return cls.dbm().modelclass_delete_all_bywhereclause(cls, whereclause)", "def delete(self):\n expr = self.model.__table__.delete().where(self.query)\n return self._runquery(expr)", "def del_objects_by_type(self, type_):\n [o.del_object_from_parent() for o in self.get_objects_by_type(type_)]", "def _api_delete(self, query):\n if not isinstance(query, list):\n query = [query]\n\n req = list()\n for q in query:\n r = requests.delete(self._url + q, headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'}, auth=self._auth,\n timeout=self._request_timeout_secs)\n r.raise_for_status()\n req.append(r)\n return req", "def delete(self):\n self._not_support_combined_queries(\"delete\")\n if self.query.is_sliced:\n raise TypeError(\"Cannot use 'limit' or 'offset' with delete().\")\n if self.query.distinct_fields:\n raise TypeError(\"Cannot call delete() after .distinct(*fields).\")\n if self._fields is not None:\n raise TypeError(\"Cannot call delete() after .values() or .values_list()\")\n\n del_query = self._chain()\n\n # The delete is actually 2 queries - one to find related objects,\n # and one to delete. Make sure that the discovery of related\n # objects is performed on the same database as the deletion.\n del_query._for_write = True\n\n # Disable non-supported fields.\n del_query.query.select_for_update = False\n del_query.query.select_related = False\n del_query.query.clear_ordering(force=True)\n\n collector = Collector(using=del_query.db, origin=self)\n collector.collect(del_query)\n deleted, _rows_count = collector.delete()\n\n # Clear the result cache, in case this QuerySet gets reused.\n self._result_cache = None\n return deleted, _rows_count", "def remove(self):\n for db in self.values():\n db.remove()", "def prune(cls):\n keep_ids = cls.objects.distinct(\"channel_id\", \"action\").order_by(\"channel_id\", \"action\", \"-performed\").values_list(\"id\", flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()", "def __delitem__(self, query_filter):\n subquery_count = len(self.__bound_queries)\n keyerror_count = 0\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n try:\n saved_items.append(query.get(query_filter, None))\n del query[query_filter]\n except KeyError:\n keyerror_count += 1\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n raise\n\n if keyerror_count == subquery_count:\n raise KeyError(query_filter)", "def clear(self):\n for vertex in self.vertices():\n del self[vertex]", "def removeall(table):\n doall(\"DELETE FROM {table}\".format(table=table))", "def _extract_deletes(self, query) :\n\t\tsparql = self.n.sparql\n\t\t\n\t\t# because the loop below alter's the contents of each insert\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# grab the insert list\n\t\tdeletes = query[sparql.delete]\n\t\t\n\t\tnew_deletes = []\n\t\tfor delete in deletes :\n\t\t\tif sparql.delete in delete :\n\t\t\t\tvar = delete[sparql.subject]\n\t\t\t\tpredicate = delete[sparql.predicate]\n\t\t\t\t\n\t\t\t\tdel delete[sparql.subject]\n\t\t\t\tdel delete[sparql.predicate]\n\t\t\t\t\n\t\t\t\tif predicate is None :\n\t\t\t\t\tnew_deletes.append(delete)\n\t\t\t\telse :\n\t\t\t\t\tnew_deletes.append({\n\t\t\t\t\t\tsparql.var : var,\n\t\t\t\t\t\tpredicate : delete,\n\t\t\t\t\t})\n\t\treturn new_deletes", "def remove_all_objs(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n # remove add objects\n for key in objs.keys():\n self.remove_obj(key)\n # remove attached objects\n for key in objs_attached.keys():\n self.unlink_obj(objs_attached[key].link_name, key)", "def clear_relations(cls, row_id):\n obj = cls.query.filter_by(id=row_id).first()\n\n #obj.movies.clear()\n #return commit(obj)\n\n if cls.__name__ == 'Actor':\n obj.movies.clear()\n elif cls.__name__ == 'Movie':\n obj.actors.clear()\n return commit(obj)", "def deleteall(cls, transaction):\n return Delete(\n From=cls.table,\n Where=None,\n ).on(transaction)", "def removeAllTitleQueries(self):\n self.queries[\"ti\"] = []", "def clear(self):\n self.logger.log_clear(list(self.json_collection.find()))\n self.json_collection.remove()", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def purge(self):\n keys = [k for (k, v) in self.get_range()]\n\n [self.remove(k) for k in keys]", "def clear(self) -> None:\n self.objects = []", "def _get_objects(self, object_query):\n object_name = object_query[\"object_name\"]\n expression = object_query.get(\"filters\", {}).get(\"expression\")\n\n if expression is None:\n return set()\n object_class = self.object_map[object_name]\n\n query = object_class.query\n filter_expression = self._build_expression(\n expression,\n object_class,\n object_query.get('fields', []),\n )\n if filter_expression is not None:\n query = query.filter(filter_expression)\n requested_permissions = object_query.get(\"permissions\", \"read\")\n if requested_permissions == \"update\":\n objs = [o for o in query if permissions.is_allowed_update_for(o)]\n else:\n objs = [o for o in query if permissions.is_allowed_read_for(o)]\n\n return objs", "def delete_all(self):\n query = \"\"\"MATCH(n) DETACH DELETE n\"\"\"\n return self.create_tx(query)", "def clear(self):\n for project in Project.objects:\n project.delete()", "def del_all_records():\n delete_alles = Customer.delete().where(Customer.name >= '')\n delete_alles.execute()", "def remove_all(self, *items):\n for item in items:\n self.remove(item)", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def clear_all(self):\n raise NotImplementedError", "def delete_many(collection: Collection, query):\n return collection.delete_many(query).deleted_count", "def delete(self):\n self._objects.remove(self._objects[0])\n return self._objects", "def delete_all():\n answer = ['YES', 'NO']\n str = rs.GetString(\"Delete all objects?\", 'YES', answer)\n\n if str == 'YES':\n obs = rs.ObjectsByType(0)\n rs.DeleteObjects(obs)\n elif str == 'NO':\n pass\n else:\n sys.exit()", "def __iter__(self, *args, **kwargs):\n iterator = self.original_query.__iter__(*args, **kwargs)\n self._delete_hosts()\n return iterator", "def clear(*objects_to_clear):\n if not hasattr(_thread_local_data, 'current_space'):\n return\n\n space = current_space()\n for obj in objects_to_clear:\n space.clear(obj)", "def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)", "def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything", "def RemoveAll(*args, **kwargs):\n return _gdi_.PseudoDC_RemoveAll(*args, **kwargs)", "def remove():", "def pop_all(self):\n if self.items:\n self.pop()\n return self.pop_all()", "def clear_slow_queries(self):\n request = Request(method=\"delete\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryClearError(resp, request)\n return True\n\n return self._execute(request, response_handler)", "def delete_all(self, Model):\n ndb.delete_multi(\n Model.query().fetch(keys_only=True)\n )", "def clearpredicates(self):\n self._preds = []", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def union_all(self, query):\n return self.union(query, True)", "def delete_all(self):\n self.session.query(TodoItem).delete()\n self.session.query(TodoList).delete()", "def clear_results(self):\n for res in self.results:\n self.results[res] = None", "def clear(self, clearall=False):\n def closure(cur):\n cur.execute(\"\"\"\n DELETE FROM events\n \"\"\")\n if clearall:\n cur.execute(\"\"\"\n DELETE FROM slides\n \"\"\")\n cur.execute(\"\"\"\n DELETE FROM rooms\n \"\"\")\n self.execute(closure)", "def pull(self, query, reload=True):\n qs = self._get_queryset()\n qs.update_one({'$pull': {self.__field_name__: query}})\n\n self.__log__.append(SetPull(query=query))\n\n if reload:\n self.reload()", "def removeAbstractQuery(self, abstractQuery):\n try:\n self.queries[\"abs\"].remove(abstractQuery)\n except ValueError:\n raise NotInQueryException", "def remove_all(self, item):\n # type: (Any) -> None\n item = self.ref(item)\n while list.__contains__(self, item):\n list.remove(self, item)", "def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()", "def remove_duplicates(self, qs, m2m_type='column_raw'):\n ColumnMapping.objects.filter(\n **{\n '{0}__in'.format(m2m_type): qs,\n 'super_organization': self.super_organization\n }\n ).exclude(pk=self.pk).delete()", "def removeAll(self, *args):\n pass", "def removeAllAuthorQueries(self):\n self.queries[\"au\"] = []", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def clear(self):\n for key in self.keys():\n del self[key]", "def remove(predicate, coll):\r\n return filter(lambda x: not predicate(x), coll)", "def removeAll(self):\n self.pDict.clear()", "def remove(self, obj):\n self._pkcache.pop(obj.pk, None)\n for ctype in obj._content_types:\n if obj.pk in self._typecache[ctype]:\n self._typecache[ctype].pop(obj.pk, None)", "async def clear_all(self) -> None:", "def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)", "def remove(predicate, coll):\n return filter(lambda x: not predicate(x), coll)", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def delete(self, query, callback=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data", "def delete(self):\n for obj in self:\n _unset_related_objects_relations(obj)\n\n self.update(deleted=now())", "def remove(self):", "def clear(self):\r\n # Remove all Currency from the handler's parent object.\r\n for currency in self.all:\r\n self.remove(currency)", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def test_deletebyquery_body_api(self):\n query_body = { \"term\": {\"name\": \"joe\"}}\n result = self.es.delete_by_query(query_body=query_body,\n indexes=['contacts_esclient_test'],\n doctypes=['person'])\n self.assertTrue(result['ok'])\n self.assertTrue(self.es.refresh('contacts_esclient_test'))\n result = self.es.get('contacts_esclient_test', 'person', 1)\n self.assertFalse(result['found'])\n result = self.es.get('contacts_esclient_test', 'person', 1)\n self.assertFalse(result['found'])", "def clean(self, destroy=True):\n\t\twhile self.clean_one(destroy=destroy):\n\t\t\tpass", "def clear(cls, resq):\n first = MultipleBackend.classes[0]\n return first.clear(resq)", "def clear(self):\n self.results.clear()", "def delete_named_query(NamedQueryId=None):\n pass" ]
[ "0.7295163", "0.71565104", "0.6959021", "0.6853164", "0.660744", "0.6603074", "0.6566388", "0.6547425", "0.65339786", "0.65339786", "0.6423338", "0.64070696", "0.63401437", "0.6265542", "0.6230876", "0.61956275", "0.61956275", "0.6149756", "0.6123274", "0.6103659", "0.60984194", "0.60613704", "0.6006218", "0.59836346", "0.59254295", "0.5851449", "0.58506", "0.5848191", "0.5780722", "0.57518995", "0.574111", "0.57201266", "0.57039696", "0.56950736", "0.5655858", "0.56523716", "0.5630662", "0.5622686", "0.56082666", "0.56050766", "0.55903757", "0.55743414", "0.5570839", "0.55695915", "0.5557812", "0.5557556", "0.5545088", "0.5541262", "0.55394137", "0.55373824", "0.55261374", "0.5521254", "0.5520701", "0.55191123", "0.5518644", "0.55136234", "0.5507464", "0.5502294", "0.54983175", "0.54917073", "0.5490709", "0.5489156", "0.54890114", "0.54886854", "0.54758704", "0.5456009", "0.5449124", "0.5447832", "0.5445725", "0.54317874", "0.5424959", "0.5412082", "0.5398772", "0.5397606", "0.53931236", "0.53903234", "0.5384649", "0.5383467", "0.53729665", "0.53629446", "0.53615206", "0.53591526", "0.53557616", "0.5344846", "0.5341385", "0.532278", "0.5316552", "0.5316198", "0.5315129", "0.5314529", "0.53136736", "0.5307852", "0.53006357", "0.5296036", "0.5295521", "0.5290205", "0.52875674", "0.5278898", "0.5277037", "0.5273492", "0.5269906" ]
0.0
-1
Given a primary key, remove the referenced object.
def test_remove_one(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeidfobject(self, idfobject):\n key = idfobject.key.upper()\n self.idfobjects[key].remove(idfobject)\n self._reset_dependant_vars(\"idfobjects\")", "def del_object_from_parent(self):\n if self.parent:\n self.parent.objects.pop(self.ref)", "def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)", "def remove_object(cls, object_to_be_removed):\n cls.query.filter_by(x=object_to_be_removed.x,\n y=object_to_be_removed.y).delete()", "def remove(obj_objectid_or_path_tuple):", "def __del__(self):\n # Only an integer is passed to the call\n self.ph.remove(self.ID)\n # No new references were created, nothing retained", "def remove_object(self, object_to_be_removed):\n Map.remove_object(object_to_be_removed)\n object_to_be_removed.query.delete()", "def delete_refobj(self, refobj):\n with common.locknode(refobj, lock=False):\n cmds.delete(refobj)", "def obj_delete(self, request=None, **kwargs):\n self.get_collection(request).remove({ \"_id\": ObjectId(kwargs.get(\"pk\")) })", "def delete(self):\n self.id = uuid4()\n DataStore.remove_instance(self)", "def remove_object(self, obj):\n pass", "def remove(self, key: int) -> None:\n pos = self.hash(key)\n\n if key in self.table[pos]:\n del self.table[pos][key]", "def delete(self):\n result = self.where({self.__class__.get_primary():self.primary})\n return result.delete()[0]", "def remove(self, key: int | str):\n self.__delitem__(key)", "def delete_record(self, key):\n del self._records[key]", "def remove_object(self, obj: str):\n if obj in self._objects:\n self._objects.remove(obj)\n else:\n raise IDDoesNotExist", "def delete(self, key):\n try: \n self.pop(key)\n \n except KeyError: \n raise KeyError", "def destroy(self):\n if self._in_db:\n if self._id is None:\n raise PrimaryKeyValueNotFound #! need primarykey to track this instance\n return type(self).at(self._id).delete().execute()\n return None", "def delete(self, key: str) -> None:\n self.db.rem(key)\n self.db.dump()", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def _remove(self, job_id):\n self.connection._lrem(self.key, 1, job_id)", "def remove(self):\r\n\t\tself._delete()", "def remove(self, key):\n return self._db.hdel(self.index, key)", "def __delitem__(self, key):\n self.f_remove(key)", "def remove(self, obj):\n self._pkcache.pop(obj.pk, None)\n for ctype in obj._content_types:\n if obj.pk in self._typecache[ctype]:\n self._typecache[ctype].pop(obj.pk, None)", "def delete(self, obj=None):\n if not obj:\n return\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n if key in self.__objects:\n del self.__objects[key]\n self.save()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self, key):\n del self[key]", "def delete_object(self, object):\n object.delete()", "def _delete(self, key):\n path = self._get_key_path(key)\n remove(path)", "def removeObject(self, objectID):\n del self.objects[objectID]\n del self.lost[objectID]", "def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()", "def remove(self, object):\n if not hasattr(object, 'id') or not object.id:\n raise ValueError(\"The model must have an ID\")\n if str(object._meta.object_name) != self.object_type:\n raise ValueError(\"Model type don't match\")\n already_objects = self.objects_id.split(';')\n if str(object.id) in already_objects:\n already_objects.remove(str(object.id))\n self.objects_id = self._convertListToString(already_objects)\n self.save()", "def remove(self, document):\n return self.db.pop(document['id'], None)", "def remove(self):\n db.session.delete(self)\n db.session.commit()", "def remove(self, key):\n elem = self.find(key)\n if not elem:\n return\n self.remove_elem(elem)", "def delete(self, key):\n if key not in self.db:\n raise LookupError(\"No record for key \\\"%s\\\" exists.\" % key)\n\n record = self.db[key]\n del self.db[key]\n return record", "def remove_key(self, key, save_pkl=True):\r\n self.keys.remove(key)\r\n if save_pkl:\r\n self.save_pkl()", "def remove(self):\n with managed_session() as session:\n session.delete(self)", "def remove(table, id_):\n\n record = common.find_id(table, id_[0])\n if record in table:\n table = common.remove_record(table, record)\n\n return table", "def remove(self, ID):\n if ID in self.pDict:\n del self.pDict[ID]", "def __delitem__(self, key):\n self.deleteAttributes([key])", "def delete_bykey(cls, keydict):\n cls.dbm().modelclass_deletebykey(cls, keydict)", "def remove(self, key):\n try:\n del self._store[key]\n except KeyError:\n pass", "def remove_key(self, key):\n del self.data[key]\n self.save_data()", "def remove(self, job_or_id):\n job_id = job_or_id.id if isinstance(job_or_id, Job) else job_or_id\n self.connection.lrem(self.key, 0, job_id)\n return defer.succeed(job_or_id)", "def remove(self, key):", "def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)", "def __delitem__(self, key: T) -> None:\n self.delete(key)", "def delete(self, key):", "def delete(self, obj=None):\n if obj is not None:\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n try:\n del self.__objects[key]\n except KeyError:\n pass", "def delete_by_id(self, pkId: int):\n if not self.model:\n raise NameError('database model has not been set.')\n if not pkId:\n raise ValueError('invalid primary key value.')\n\n with self.session() as session:\n query = self.get_query(session)\n query.filter(self.model.pkId == pkId).delete()", "def remove(self, key: int) -> None:\n chain, idx = self._search(key)\n if idx is not None:\n chain.pop(idx)", "def remove_row(self, row_id):", "def remove(self, key: int) -> None:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n curr.next = curr.next.next\n return\n curr = curr.next", "def remove(self, key):\n old_list = self._elements\n ElementList.remove(self, key)\n self._check_primary(old_list)\n return self", "def remove(self, key):\n pass", "def delete(self, key):\n return None", "def delete(self, key):\n self.tree.delete(key)", "def delete(self, object_id):\n libplasma.delete(self.conn, object_id)", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def delete_item(self, table_name: str, primary_key: dict):\n table = self.resource.Table(table_name)\n table.delete_item(Key=primary_key)", "def remove(self, key):\n \n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n if key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n\n self.keys.pop(key, None)\n self.buffered.pop(key, None)\n self.cache.pop(key, None)\n\n if self.autocommit:\n commit()", "def remove_by_id(self,nodeid,verbose=False):\n self.remove(self[nodeid],verbose=verbose)", "def __delitem__(self, key):\n\n if key not in self:\n raise KeyError(key)\n\n if self.is_view:\n self._view.remove(key)\n\n # resolve orphan data pointers\n # TODO: this may be a performance bottle neck in large graphs\n for target_key, target_value in self._storage.items():\n if target_value.get(self._data_pointer_key) == key:\n\n self._storage[target_key].update(self._storage[key])\n del self._storage[target_key][self._data_pointer_key]\n\n del self._storage[key]", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None", "def __delitem__(self, key):\n bucket = self._buckets[self._index(key)]\n for node in bucket.linked_list:\n bucket_object_key, bucket_object_value = node.value\n if bucket_object_key.load_value() == key:\n # remove objects from object -> list_node dict\n key_list_node = self._object_to_list_node.pop(bucket_object_key)\n value_list_node = self._object_to_list_node.pop(bucket_object_value)\n # remove list_node from in_memory and disk objects\n self._in_memory_objects.remove(key_list_node)\n self._in_memory_objects.remove(value_list_node)\n self._disk_objects.remove(key_list_node)\n self._disk_objects.remove(value_list_node)\n # remove node from bucket linked list\n assert bucket.linked_list.remove(node) == True\n self._balance()\n return\n raise KeyError(\"Key `{}` is not exists\".format(key))", "def delete(self):\n if self.iid is not None:\n self.db().remove(self.iid)", "def __delitem__(self, key):\n\n del self._vertices[key]", "def remove(self, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.remove(id_obj, query_data)\n return self.collection.remove(query_data)", "def delete(self, key):\n self.map.pop(key, None)", "def remove(self, key):\n match = self.find(key)\n if not match:\n raise UserDBValueError(\"Element not found in list\")\n\n self._elements = [this for this in self._elements if this != match]\n return self", "def remove(self, key: int) -> None:\n if key in self.keys:\n idx = self.keys.index(key)\n self.keys.pop(idx)\n self.values.pop(idx)", "def delete(self) -> None:\n self.pop()", "def delete(self,key):\n self._lock.acquire()\n del self._db[key]\n self._lock.release()\n logging.debug(\"removed key %s from db\", key)", "def remove(self, *args):\n self.__execute(self.pkgin_bin, \"remove\", *args)", "def _delete_key(self):\n return self.connection.hdel(self.key, self.name)", "def delete(self, _id):", "def delete(self, logical_key):\n path = self._split_key(logical_key)\n pkg = self[path[:-1]]\n del pkg._children[path[-1]]\n return self", "def __delitem__(self,key):\n self.table.delItem(key,self.column)", "def delete(cls, row_id):\n # obj = db.session.query(cls).filter(cls.id == row_id).delete()\n obj = db.session.query(cls).filter_by(id=row_id).delete()\n db.session.commit()\n return obj", "def delete_obj(obj):\n Session.delete(obj)\n Session.flush()\n Session.commit()", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def __delitem__(self, key):\n del self._get_storage()[key]", "def __delitem__(self, key):\n del self.elements[key]", "def remove(self, data_id, idx):\n temp = self.database[data_id]\n del temp[idx]\n self.database[data_id] = temp" ]
[ "0.7022169", "0.69352937", "0.67359215", "0.6719173", "0.6582321", "0.65806663", "0.6567136", "0.6532157", "0.6484623", "0.64809275", "0.64600873", "0.64578646", "0.6424982", "0.64086294", "0.639342", "0.63872486", "0.63722575", "0.6369881", "0.63674027", "0.6356457", "0.6356457", "0.6356318", "0.6355035", "0.6342222", "0.63272065", "0.6319635", "0.63106555", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296505", "0.6296407", "0.62961686", "0.62950283", "0.62878644", "0.62851226", "0.62779546", "0.62771404", "0.6249959", "0.6247602", "0.6244628", "0.6241307", "0.6232343", "0.62157774", "0.62076247", "0.61956227", "0.6193746", "0.61921", "0.618704", "0.6186566", "0.61823976", "0.617984", "0.61781764", "0.6171365", "0.6166635", "0.61636364", "0.6160627", "0.615305", "0.61440563", "0.61245483", "0.6123099", "0.61209434", "0.6117461", "0.6117187", "0.6112899", "0.6112899", "0.6105527", "0.61048925", "0.61033666", "0.60982084", "0.609218", "0.6083661", "0.6081287", "0.60809875", "0.6080425", "0.60794526", "0.60783154", "0.60732883", "0.60712034", "0.6069552", "0.6060095", "0.6057046", "0.6048295", "0.6042892", "0.6040158", "0.6039789", "0.6039786", "0.6034942", "0.603349", "0.60223514", "0.60211116" ]
0.0
-1
Executes the status change.
def execute(self, agent: Agent, state: SimState) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateStatus(self, status):\n pass", "def change_status(self, status, application_id):", "def StatusChanged(self, state, info):\n pass", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def refresh_status() -> None:\n ...", "def UpdateStatus(self, status):\r\n self.status.update(status)", "def _update_status(self, new_status):\r\n old_status = self._status\r\n self._status = new_status\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(\r\n listener.on_status_change(\r\n self, new_status.value, old_status.value))", "def refresh_status(self):\n\n pass", "def _update_status(self):\n self._db_update({'status': self.status})", "def statusupdate(filepath):\n pass", "def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def status():\n pass", "def status(self):\n self.scion_sh('status')", "def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)", "def _update_status(self, *args, **kwargs):\n # Get Future\n future = self.future\n\n # Do nothing if no Future\n if not future:\n return\n\n # Get the status\n dask_status = future.status.lower()\n\n try:\n # Translate to TethysJob status\n self._status = self.DASK_TO_STATUS_TYPES[dask_status]\n self.save()\n # Clean up client\n self.client.close()\n\n except KeyError:\n log.error('Unknown Dask Status: \"{}\"'.format(dask_status))", "def on_status_update(self, data):\n # TODO: Update User/Client object with this info\n print ('Status Update: %s' % data)", "def _process(self):\n self.kwargs[\"collect\"].change_status(self.kwargs[\"collect\"].FINALIZED)", "def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code", "def update_status(self, callback_function_param=False):\n self.send_message(\n {MESSAGE_TYPE: TYPE_GET_STATUS}, callback_function=callback_function_param\n )", "def status(self):", "def status(self, status: dict):\n pass", "def change_status(self):\n message = self.state_frame[0]\n self.on_status_update(message)\n self.state = STATE_READ_LINE", "def _on_status_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n if self.enabled and new == self.app.States.clean.value:\n self._start_notification_cycle()\n elif old == self.app.States.clean.value:\n self._cancel_notification_cycle()", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_status(self, status):\n if not status == self._status:\n self._status = status\n self.winstance.send_event('State changed to ' + self._status)\n\n self.completed = not self.parent_node.is_job or \\\n self._status == 'COMPLETED'\n\n if self.completed:\n self.publish()\n\n if not self.parent_node.is_job:\n self.failed = False\n else:\n self.failed = self.parent_node.is_job and \\\n (self._status == 'BOOT_FAIL' or\n self._status == 'CANCELLED' or\n self._status == 'FAILED' or\n self._status == 'REVOKED' or\n self._status == 'TIMEOUT')", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def SetStatus(self, status):\r\n self.status = status", "def execute(self) -> None:\n self.state()", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def processInterfaceStatusUpdate(iTag, status): #@NoSelf", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def status_change(check_id, output, sleep_time=1):\n\n output_list = output[0].split()\n this_check = Check.objects.get(id=check_id)\n\n if b'OK' in output_list[1]:\n # Set check status to OK\n this_check.status = 'OK'\n\n elif b'WARNING' in output_list[1]:\n # Set check status to WARNING\n this_check.status = 'WARNING'\n else:\n # Set check status to Fail\n this_check.status = 'FAIL'\n\n # Set check output/last_run data\n this_check.output = output[0].decode('utf-8')\n this_check.last_run = timezone.now()\n this_check.save()\n this_check.update_service_status()\n\n # Update check RRD\n check_rrd.update(this_check.id, this_check.output)\n\n time.sleep(sleep_time)", "async def status(self, context):\n await self.send_message(context, await self.status_msg_packed(context))", "def change_status(self, inf, status):\n self.interfaces[inf]['status'] = status", "def on_algorithm_status_change(self, data):\n status = data.get('status')\n run_id = data.get('run_id')\n if has_task_failed(status):\n # TODO handle run sequence at this node. Maybe terminate all\n # containers with the same run_id?\n self.log.critical(\n f\"A container on a node within your collaboration part of \"\n f\"run_id={run_id} has exited with status '{status}'\"\n )\n # else: no need to do anything when a task has started/finished/... on\n # another node", "def status(self, **options):\n pass", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def manual_update(self):\n for cb, status in zip(self._manual_cbs, self.status_objs):\n cb(status)", "def _set_status(self, action, status):\n cmd = \"curl http://{}:{}@{}/{}s.cgi?led={}\".format(self.config['username'],\n self.config['password'],\n self.config['host'],\n action,\n status)\n self.log.info(\"PDU cmd: {}\".format(cmd))\n utils.start_standing_subprocess(cmd)\n time.sleep(10)", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "async def async_update(self):\n\n await self.status_request()", "async def status(self, ctx, *, status_type: str.lower):\n if status_type == \"clear\":\n self.bot.config.remove(\"status\")\n await self.bot.config.update()\n await self.set_presence()\n embed = Embed(title=\"Status Removed\", color=self.bot.main_color)\n return await ctx.send(embed=embed)\n status_type = status_type.replace(\" \", \"_\")\n\n status, msg = (\n await self.set_presence(status_identifier=status_type, status_by_key=True)\n )[\"status\"]\n if status is None:\n raise commands.MissingRequiredArgument(SimpleNamespace(name=\"status\"))\n\n self.bot.config[\"status\"] = status.value\n await self.bot.config.update()\n\n embed = Embed(\n title=\"Status Changed\", description=msg, color=self.bot.main_color\n )\n return await ctx.send(embed=embed)", "def set_status( code ):", "def status(self, id):", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def unit_state_change_cb (unit, state) :\n\n print \"[Callback]: ComputeUnit '%s' state: %s.\" % (unit.uid, state)\n\n if state == rp.FAILED :\n sys.exit (1)", "def sync_status_to_vc(status, context):\n conn = self._vc_connection\n conn.vip.health.set_status(status, context)", "def do_status(self, *arg):\n if self.pocs is None:\n print_warning('Please run `setup_pocs` before trying to run')\n return\n if self.msg_subscriber is None:\n self.do_start_messaging()\n status = self.pocs.status()\n print()\n pprint(status)\n print()", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, *args):\n return self.cmd('status', *args)", "def update(self, new_status: Status) -> None:\n self._status = new_status", "def update_status(self, context, status):\n plugin = self.driver.service_plugin\n plugin.update_status_by_agent(\n context, status, self.driver.service_type)", "def __status(self, *args):\n return \"status\"", "def t_status_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In status process...\")\n\n d_state = self.job_state(*args, **kwargs)\n\n d_ret = d_state['d_ret']\n b_status = d_state['status']\n\n l_keys = d_ret.items()\n l_status = []\n for i in range(0, int(len(l_keys)/2)):\n b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]\n try:\n endcode = d_ret['%s.end' % str(i)]['returncode'][0]\n except:\n endcode = None\n\n if endcode == None and b_startEvent:\n l_status.append('started')\n if not endcode and b_startEvent and type(endcode) is int:\n l_status.append('finishedSuccessfully')\n if endcode and b_startEvent:\n l_status.append('finishedWithError')\n\n self.dp.qprint('b_startEvent = %d' % b_startEvent)\n self.dp.qprint(endcode)\n self.dp.qprint('l_status = %s' % l_status)\n\n d_ret['l_status'] = l_status\n return {\"d_ret\": d_ret,\n \"status\": b_status}", "def status(self, value):\n if self._status != value:\n self._status = value\n self._last_changed = now()\n self.status_events.notify(self.status_feedback)\n return self._status", "def update_status(request_id, status):\n pass", "def status(self):\n pass", "def status(self):\n pass", "def update(self):\n _LOGGER.debug(\"Updating status using the client AC instance...\")\n self.ac.update_status()\n _LOGGER.debug(\"Status updated using the client AC instance\")", "def on_status(update, context):\n current_state = context.user_data[\"state\"]\n current_request = context.user_data.get(\"current_request\", None)\n message = f\"State: {current_state}\\nRequest: {current_request}\"\n\n context.bot.send_message(chat_id=update.message.chat_id, text=message)", "def update_status(self):\n\n # first get the instances we need to check\n monitor_jobs = {}\n for _, job_node in self.get_executions_iterator():\n if job_node.is_job:\n for job_instance in job_node.instances:\n if not job_instance.simulate:\n if job_instance.host in monitor_jobs:\n monitor_jobs[job_instance.host]['names'].append(\n job_instance.name)\n else:\n monitor_jobs[job_instance.host] = {\n 'config': job_instance.monitor_config,\n 'type': job_instance.monitor_type,\n 'workdir': job_instance.workdir,\n 'names': [job_instance.name],\n 'period': job_instance.monitor_period\n }\n else:\n job_instance.set_status('COMPLETED')\n\n # nothing to do if we don't have nothing to monitor\n if not monitor_jobs:\n return\n\n # then look for the status of the instances through its name\n states = self.jobs_requester.request(monitor_jobs, self.logger)\n\n # finally set job status\n for inst_name, state in states.iteritems():\n self.job_instances_map[inst_name].set_status(state)\n\n # We wait to slow down the loop\n sys.stdout.flush() # necessary to output work properly with sleep\n time.sleep(LOOP_PERIOD)", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def _update_status(self, message):\n message = \"[{}] {}\".format(strftime(\"%H:%M:%S\", localtime()), message)\n self.tracker.write_log(message)\n self.ui.update_status(message)", "def _status_btn_clicked(root, item):\n sql_status_update = 'UPDATE job SET Job_Status = \"Complete\" WHERE Job_ID = '+str(item[0])+';'\n print (sql_status_update)\n conn = pymysql.connect(host='localhost', user='root', password='#######', db='######')\n a = conn.cursor()\n a.execute(sql_status_update)\n conn.commit()\n a.close()\n conn.close()", "def updateStatus(self, newStatus = False):\n\t\theight, width = self.screen.getmaxyx()\n\t\tif newStatus:\n\t\t\tself.status = str(newStatus)\n\t\tspaces = width - len(self.status) - 2\n\t\tself.wts(height - 1, 1, self.status + ' ' * spaces , 1)\n\t\tself.screen.refresh()" ]
[ "0.7463658", "0.73751247", "0.7124807", "0.70087713", "0.69775283", "0.69684094", "0.6961473", "0.6950376", "0.6876861", "0.67328113", "0.6729613", "0.6650536", "0.66410106", "0.65832186", "0.65808666", "0.6556705", "0.6552023", "0.6541808", "0.6536998", "0.6520757", "0.6516256", "0.6503798", "0.6486059", "0.64732426", "0.6469516", "0.64655715", "0.6461171", "0.6461171", "0.6461171", "0.6461171", "0.6461171", "0.6461171", "0.6461171", "0.6457899", "0.64475805", "0.64448875", "0.6443576", "0.64424574", "0.64367527", "0.6417033", "0.6385214", "0.6358974", "0.6355656", "0.63485855", "0.63269305", "0.63254386", "0.6323767", "0.63085306", "0.63085306", "0.63085306", "0.6303571", "0.6301685", "0.6298358", "0.62747735", "0.62685406", "0.62625253", "0.6260161", "0.6257273", "0.62514734", "0.62419343", "0.62360364", "0.62308085", "0.6230689", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.62288994", "0.6228817", "0.6227653", "0.6227281", "0.6216583", "0.6214308", "0.62054163", "0.62012386", "0.6200442", "0.6200442", "0.61987704", "0.6198394", "0.61874646", "0.6176639", "0.6171876", "0.61638606", "0.61620927" ]
0.0
-1
Basically the same method as in the DefaultStatusStrategy, but adding the lethality check.
def execute(self, agent: Agent, state: SimState) -> None: if agent.state() is not AgentState.INFECTIVE: return if np.random.random() < state.remove_prob(): if np.random.random() < state.lethality(): agent.set_state(AgentState.DEAD) else: agent.set_state(AgentState.IMMUNE) else: agent.update_sick_days()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_status(self):", "def custom_assess_status_check(self):\n options = self.options\n # can check options.thing to ensure that it makes sense\n # if wrong return 'blocked', \"The driver is badly configured ...\"\n return None, None", "def test_get_status(self) -> None:\n\n given = \"example.org\"\n\n # This is an abstract method. So we need to define it.\n self.checker.query_status = lambda: None\n\n self.checker.subject = given\n\n actual = self.checker.get_status()\n\n self.assertIsInstance(actual, CheckerStatusBase)", "def checkStatus(self):\n return None", "def status(self):\n return (not self.path == None) or self.append_ml_status", "def getStatus():", "def check(self):\n raise NotImplementedError()\n # return (level, **kw)", "def status(ABC) -> bool:", "def defaultStatus(self):\n raise NotImplementedError", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def health_check(self, *, scope: Scope) -> HealthCheckStatus:", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def postcheck(self):\n logger.debug(\"Postcheck status is %s\" % self.status)\n return self.status", "def status(self, status: dict):\n pass", "def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def check_status(self, id):\n raise NotImplementedError()", "def status_before_must_be(*valid_start_statuses):\r\n def decorator_func(func):\r\n \"\"\"\r\n Decorator function that gets returned\r\n \"\"\"\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check\r\n\r\n return decorator_func", "def _status_exists(self, cls=MySQLStatus):", "def status(self):", "def decorator_func(func):\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check", "def _check_status(self, ests):\n if self.status != ests:\n raise CpoException(\"Unexpected solver status. Should be '{}' instead of '{}'\".format(ests, self.status))", "def status(self):\n raise NotImplementedError()", "def on_status(self, status):\n if status.retweeted_status:\n return", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False", "def _getLegalityStatus(self):\r\n try:\r\n self._checkPawn()\r\n self._checkPlayer()\r\n self._checkPath()\r\n self._checkCells()\r\n except (IllegalMoveException, CellException) as errorMessage:\r\n return (False, errorMessage)\r\n return (True, \"Legal move\")", "def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status", "def status(self):\n pass", "def status(self):\n pass", "def _get_status(self):\n return self.__status", "def __level(self, *args, **kwargs):\n pass", "def __init__(self: \"Status\") -> None:\n raise NotImplementedError(\n \"Please instantiate one of the `Status` \"\n \"subclasses:\\n\"\n \"\\n\\t- `Failed`\"\n \"\\n\\t- `NotStarted`\"\n \"\\n\\t- `InProgress(progress)`\"\n \"\\n\\t- `Succeeded`\"\n )", "def monitor_behavior_status(self):\n self._flexbe_status_subscriber = rospy.Subscriber('/flexbe/status', BEStatus, self.callback_flexbe_status)", "def __lt__(self: \"Status\", other: \"Status\") -> bool:\n self_type = type(self)\n other_type = type(other)\n both_not_in_progress = not self.in_progress and not other.in_progress\n\n if both_not_in_progress and self_type is other_type:\n return False\n elif self_type is Failed:\n return True\n elif self_type is NotStarted and other_type in (InProgress, Succeeded):\n return True\n elif self_type is InProgress and other_type is InProgress:\n return self.progress < other.progress # type: ignore\n elif self_type is InProgress and other_type is Succeeded:\n return True\n else:\n return False", "def _apply_log_status(self, log_status):\n for k, v in log_status.items():\n if v is True:\n rdBase.EnableLog(k)\n else:\n rdBase.DisableLog(k)", "def make_decision(self):\n raise RuntimeError(\"the 'make_decision' method must be overriden\")", "def makeCondition (self, source) :\n condition = 'OK'\n if 'status' in response :\n if condition in ('OK', 'rejected', 'deferred') :\n condition = response ['status'];\n else :\n raise moderationError ('error in status', source)\n return condition", "def status(*args, **kwargs): # real signature unknown\n pass", "def custom_assess_status_check(self):\n check_config_set = []\n if self.backup_target_type == \"nfs\":\n check_config_set = ['nfs-shares']\n elif self.backup_target_type == \"s3\":\n check_config_set = [\n \"tv-s3-secret-key\",\n \"tv-s3-access-key\",\n \"tv-s3-region-name\",\n \"tv-s3-bucket\",\n \"tv-s3-endpoint-url\"]\n unset_config = [c for c in check_config_set if not hookenv.config(c)]\n if unset_config:\n return \"blocked\", \"{} configuration not set\".format(\n ', '.join(unset_config))\n # For s3 support backup-target-type should be set to 'experimental-s3'\n # as s3 support is pre-production. The self.backup_target_type\n # property will do any transaltion needed.\n if self.backup_target_type not in [\"nfs\", \"s3\"]:\n return \"blocked\", \"Backup target type not supported\"\n return None, None", "def get_status(self):\n return super(Cabling, self).get_status()", "def status():\n pass", "def check_state(self):\n pass", "def test_set_user_status(self):\n pass", "def passes(self) -> bool:\n ...", "def __status(self, *args):\n return \"status\"", "def _status(self, host):\n pass", "def get_status() -> None:\n assert scraper.get_status() == True", "def statusUsable (statusID):\n if db.status.is_retired(statusID): \n return False\n if db.status.get(statusID, 'name') in ignoredStatuses:\n return False\n return True", "def status(self, **options):\n pass", "def inverted_patch_status(self):\n if self._status == MODIFIED_STATUS:\n # Modifications will remain modifications.\n inverted_status = self._status\n elif self._status == DELETED_STATUS:\n # Deletions will now be additions.\n inverted_status = ADDED_STATUS\n elif (self._status == ADDED_STATUS or\n self._status == COPIED_AND_MODIFIED_STATUS):\n # Additions will now be deletions.\n inverted_status = DELETED_STATUS\n else:\n # Everything else will be treated as a modification.\n inverted_status = MODIFIED_STATUS\n return inverted_status", "def hook_priority(self) -> int:", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def status_check():\n return {\"status\": \"OK\"}", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def updateStatus(self, status):\n pass", "def _on_status_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n if self.enabled and new == self.app.States.clean.value:\n self._start_notification_cycle()\n elif old == self.app.States.clean.value:\n self._cancel_notification_cycle()", "def add_status(name, nvr, commit):\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}", "def _wander_strategy(self):\n\n result = 'moveForward'\n\n if self.last_status == 'Fail' or self.last_action == 'Action.drop':\n result = 'turnLeft'\n\n return result", "async def set_status(event, gh, *args, **kwargs):\n issue_number_found = ISSUE_RE.search(event.data[\"pull_request\"][\"title\"])\n if not issue_number_found:\n issue_url = event.data[\"pull_request\"][\"issue_url\"]\n data = await gh.getitem(issue_url)\n for label in data[\"labels\"]:\n if label[\"name\"] == TRIVIAL_LABEL:\n status = TRIVIAL_STATUS\n break\n else:\n status = FAILURE_STATUS\n else:\n status = create_success_status(issue_number_found)\n await _post_status(event, gh, status)", "def get_status(self, state):\n raise NotImplementedError", "def old_statuses(self):\n return [\"passed_checks\", \"needs_checking\", \"known_bad\", \"not_connected\"]", "def check_status(self, sensor, state):\n self.sensed = sensor\n if state == 1:\n self.winner = True\n elif state == -1:\n self.alive = False", "def check(self):\n raise NotImplementedError", "def process(self):\n value, kw = self.check()\n \n level = logging.OK\n for i, j in self.thresholds:\n if value > i: level = j\n \n return level, kw", "def _check_le_1(self, target, **kwargs):\n # For every keyword argument\n for key, value in kwargs.items():\n # Set boolean conditions\n applicable_keyword = key in self._le_1_keywords\n applicable_target = target in self._le_1_targets\n # If key is in specified list\n if applicable_keyword and applicable_target:\n # Check if value is less than or equal to 1\n if 0.0 <= value <= 1.0:\n pass\n # If not, raise error\n else:\n raise FairException('\"{}\" must have \"{}\" value between zero and one.'.format(target, key))", "def update_landing_status(self, event):\n landed = (not self.state.armed)\n if self.irr_name == '' and self._min_range > -1.:\n self.landed = (self.rangefinder <=\n (self._min_range + 0.1)) or landed\n else:\n self.landed = landed or (self.rel_alt <= 0.1)", "def check(self):\n raise NotImplementedError('Must be implemented by subclass.')", "def defaultStatus(self, value=None):\n raise NotImplementedError", "def _do_status(self) -> Dict[str, Any]:\n return {}", "def _do_status(self) -> Dict[str, Any]:\n return {}", "def standard_status():\n errors, warnings, infos = THE_LOGGER.status()\n info(errors, \"errors\")\n info(warnings, \"warnings\")\n info(infos, \"infos\")", "def enable_status(self, status_type, required=False):\n try:\n status_type = status_type.lower()\n assert status_type in ('warning', 'critical')\n except (AttributeError, AssertionError):\n self.unknown_error(\n \"Status_type can only be one of *warning* or *critical*!\")\n\n self.add_option(status_type[0], status_type,\n \"Set the %s notification level.\" % status_type,\n required=required, action='callback', callback=convert_range)", "def check_unstaged_changes(self):\n pass", "def check(self) -> None:", "def _manage_status(response, target):\n name = target.link\n\n # We shall never crawl it again.\n if response.status == HTTPStatus.GONE:\n logging.warning(\"RSS is dead for %r.\", name)\n target.has_gone()\n return False\n\n # Becomes aware that it requires auth and must support it in the future.\n if response.status == HTTPStatus.UNAUTHORIZED:\n logging.warning(\"RSS requires auth for %r.\", name)\n target.needs_auth()\n return False\n\n # Nothing new received from it.\n if response.status == HTTPStatus.NOT_MODIFIED:\n logging.info(\"RSS has no data for %r.\", name)\n return False\n\n # URL has permanently moved, so we have to update target with the new one.\n if response.status == HTTPStatus.MOVED_PERMANENTLY:\n logging.info(\"RSS has moved for %r.\", name)\n target.moved_to(response.href)\n\n return True", "def test_get_node_status_batterystatus(self):\n pass", "def on_status_change(self, aws_greengrass, new_status, old_status):\r\n raise NotImplementedError('You must implement \"on_status_change()\" to '\r\n 'use the \"AWSGreengrassListener\" class.')", "def _Check(self, solution_type):\n zero_range = check_range.Singleton(0)\n for error_name, error_value in NOVATEL_SOLUTION_STATUS_HELPER:\n # Skip 'SolComputed' because that is what we want the status to be.\n if error_value == 0:\n continue\n # Raise a warning if the status is equal to the error_value.\n self._CheckForFailure(self._source + ' ' + error_name,\n numpy.array([int(s == error_value) for s in\n solution_type]),\n zero_range, False)", "def check(self):\n if (sum(self.state) == 0):\n return -1\n elif (self.state[-1] >= 1):\n return 1\n else:\n return 0", "def update(self, elapsed):\n delta = 18 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('undesired-stimulus-releaser')\n sorry = self.behavior_system.robot.emotion_system.emotion_sorrow\n\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == sorry:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def checkBuildStatus(self):\n pass", "def update_alive_status(self):\n self.alive = self.health > 0", "def compute_sli_status(sli: SLI) -> \"StatusValue\":\n if sli.slo_warn_lower_bound < sli.sli_value < sli.slo_warn_upper_bound:\n status_value = Status.STATUS_HEALTHY\n elif sli.slo_error_lower_bound < sli.sli_value < sli.slo_error_upper_bound:\n status_value = Status.STATUS_WARN\n else:\n status_value = Status.STATUS_ERROR\n\n sli.status = status_value\n return status_value", "def change_learned_status(self, instance):\n self.song = self.songs.get_song_by_title(instance.text)\n # Marks song as learned and shows according status text\n if self.song.required:\n self.song.mark_learned()\n status_text = \"You have learned {}\".format(self.song.title)\n # Marks song as required and shows according status text\n else:\n self.song.mark_required()\n status_text = \"You need to learn {}\".format(self.song.title)\n # Shows status text, sorts songs by current s\n self.root.ids.status_text.text = status_text\n self.sort_songs(self.root.ids.sort_options.text)", "def __process_health(self) -> None:\n status = self.metrics.get(\"Status\", None)\n if status:\n health = status.get(\"Health\", None)\n measurement = \"Health\"\n if health == \"Warning\":\n value = 1\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n elif health == \"Critical\":\n value = 2\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n return", "def _getDefaultComponentStatus(self):\n host = socket.gethostname()\n defaultStatus = {'Down': set(), 'Run': set(), 'All': set()}\n resRunning = gConfig.getOptionsDict(os.path.join('/Registry/Hosts/', host, 'Running'))\n resStopped = gConfig.getOptionsDict(os.path.join('/Registry/Hosts/', host, 'Stopped'))\n if not resRunning['OK']:\n return resRunning\n if not resStopped['OK']:\n return resStopped\n defaultStatus['Run'] = set(resRunning['Value'].keys())\n defaultStatus['Down'] = set(resStopped['Value'].keys())\n defaultStatus['All'] = defaultStatus['Run'] | defaultStatus['Down']\n\n if defaultStatus['Run'].intersection(defaultStatus['Down']):\n self.logError(\"Overlap in configuration\", str(defaultStatus['Run'].intersection(defaultStatus['Down'])))\n return S_ERROR(\"Bad host configuration\")\n\n return S_OK(defaultStatus)", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def check_stability(self):", "def need_attention(status_msg):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n if any([each in status_msg for each in msg]):\n return True\n return False", "def _matchStatus(self, status: int):\n return (status in self._allowedStatus['List']\n or (self._allowedStatus['Range']\n and (self._allowedStatus['Range'][0] <= status\n and status <= self._allowedStatus['Range'][1])))", "def _infer_status(playbook, playbook_stats):\n if not playbook.complete:\n return 'incomplete'\n\n if playbook_stats['failed'] >= 1 or playbook_stats['unreachable'] >= 1:\n return 'failed'\n else:\n return 'success'", "def getInfoOnStatus(self):\n raise NotImplementedError();", "def check(self) -> None:\n\n raise NotImplementedError", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()" ]
[ "0.61206144", "0.58532864", "0.5794486", "0.57871175", "0.5704715", "0.5690589", "0.5610464", "0.55541265", "0.5540758", "0.5519943", "0.54881585", "0.5483977", "0.5483977", "0.54801756", "0.54513115", "0.5443968", "0.5439035", "0.5439035", "0.5421981", "0.54105186", "0.5404381", "0.5395389", "0.5378385", "0.537472", "0.53411466", "0.5306134", "0.5289326", "0.5283029", "0.52632314", "0.5258809", "0.52354175", "0.52354175", "0.5184999", "0.51533777", "0.5150826", "0.5149559", "0.5115875", "0.5113788", "0.5106317", "0.5102997", "0.51027936", "0.5088057", "0.50873613", "0.5070694", "0.5054294", "0.5046803", "0.5034478", "0.5020852", "0.5016135", "0.5008514", "0.5007735", "0.50027496", "0.49950868", "0.49900678", "0.49781284", "0.49744725", "0.4968093", "0.4968093", "0.4968093", "0.4968093", "0.49679244", "0.49625874", "0.4956041", "0.49540392", "0.4949608", "0.4947764", "0.49422675", "0.49418768", "0.49273005", "0.49245533", "0.4924036", "0.49202552", "0.49078557", "0.49047092", "0.48986417", "0.48986417", "0.48973063", "0.4894263", "0.48915", "0.48848012", "0.48838407", "0.48807397", "0.4871641", "0.4866211", "0.48651472", "0.4862801", "0.48614866", "0.4861372", "0.48516557", "0.48454723", "0.48408383", "0.4839824", "0.4839267", "0.48367307", "0.48297158", "0.48163706", "0.48140806", "0.4811499", "0.48107803", "0.4805709", "0.48055348" ]
0.0
-1
Updates the agents 'vaccine' before executing other checks
def execute(self, agent: Agent, state: SimState) -> None: if agent.state() == AgentState.SUSCEPTIBLE and self.days == state.vaccine_time() \ and np.random.random() < state.vaccine_share(): agent.set_state(AgentState.IMMUNE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReflexVacuumAgent():\n def program(percept):\n location, status = percept\n if status == 'Dirty':\n return 'Suck'\n elif location == loc_A:\n return 'Right'\n elif location == loc_B:\n return 'Left'\n return Agent(program)", "def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)", "def ReflexVacuumAgent():\n\n def program((location, status)):\n if status == 'Dirty':\n return 'Suck'\n if location == loc_A:\n return 'Right'\n if location == loc_B:\n return 'Left'\n\n return Agent(program)", "def actualizar_velocidad(self):\r\n pass", "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1", "def test_ipam_vlans_update(self):\n pass", "def ModelBasedVacuumAgent():\n model = {loc_A: None, loc_B: None}\n\n def program(percept):\n \"\"\"Same as ReflexVacuumAgent, except if everything is clean, do NoOp.\"\"\"\n location, status = percept\n model[location] = status # Update the model here\n if model[loc_A] == model[loc_B] == 'Clean':\n return 'NoOp'\n elif status == 'Dirty':\n return 'Suck'\n elif location == loc_A:\n return 'Right'\n elif location == loc_B:\n return 'Left'\n return Agent(program)", "def update_all_agent(self):\n for a in self.agents:\n soft_update(a.target_actor, a.actor, self.tau)\n soft_update(a.target_critic, a.critic, self.tau)\n self.num_iteration += 1", "async def vac(self, ctx: Context):\n if ctx.invoked_subcommand is None:\n await self.check_vac_status_and_send_results(ctx.channel, True)", "def _default_step_action(self, agents):\n try:\n super()._default_step_action(agents)\n except NotImplementedError:\n pass\n # get collisions\n collisions = self.traci_handler.simulation.getCollidingVehiclesIDList()\n logger.debug('Collisions: %s', pformat(collisions))\n for veh in collisions:\n self.collisions[veh] += 1\n # get subscriptions\n self.veh_subscriptions = self.traci_handler.vehicle.getAllSubscriptionResults()\n for veh, vals in self.veh_subscriptions.items():\n logger.debug('Subs: %s, %s', pformat(veh), pformat(vals))\n running = set()\n for agent in agents:\n if agent in self.veh_subscriptions:\n running.add(agent)\n if len(running) == 0:\n logger.info('All the agent left the simulation..')\n self.end_simulation()\n return True", "def maintenance_enter(cls):\n d = directory.connect()\n host = socket.gethostname()\n for attempt in range(3):\n log.info(\"request-evacuation\")\n evacuated = d.evacuate_vms(host)\n if not evacuated:\n # need to call evacuate_vms again to arrive at the empty set\n break\n log.info(\"evacuation-started\", vms=evacuated)\n time.sleep(5)\n\n log.info(\"evacuation-pending\")\n # Trigger a gratuitous event handling cycle to help speed up the\n # migration.\n subprocess.call([\"systemctl\", \"reload\", \"consul\"])\n\n # Monitor whether there are still VMs running.\n timeout = TimeOut(300, interval=3)\n while timeout.tick():\n p = subprocess.Popen(\n [\"pgrep\", \"-f\", \"qemu-system-x86_64\"], stdout=subprocess.PIPE\n )\n p.wait()\n num_procs = len(p.stdout.read().splitlines())\n log.info(\n \"evacuation-running\",\n vms=num_procs,\n timeout_remaining=timeout.remaining,\n )\n if num_procs == 0:\n # We made it: no VMs remaining, so we can proceed with the\n # maintenance.\n log.info(\"evacuation-success\")\n sys.exit(0)\n time.sleep(10)\n\n log.info(\"evacuation-timeout\", action=\"postpone maintenance\")\n sys.exit(75)", "def ModelBasedVacuumAgent():\n model = {loc_A: None,\n loc_B: None}\n\n def program((location, status)):\n \"\"\"Same as ReflexVacuumAgent, except if everything is clean, do NoOp.\"\"\"\n model[location] = status\n if model[loc_A] == model[loc_B] == 'Clean':\n return 'NoOp'\n if status == 'Dirty':\n return 'Suck'\n if location == loc_A:\n return 'Right'\n if location == loc_B:\n return 'Left'\n\n return Agent(program)", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def _update_adversaries(self):\n for enemy in self.enemy_list:\n self._notify_adversary(enemy)", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def button_check_vat(self, cr, uid, ids, context=None):\n if context is None: context = {}\n context.update({'update_fiscal_information':True})\n super(res_partner, self).check_vat(cr, uid, ids, context=context)\n user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id\n if user_company.vat_check_vies:\n # force full VIES online check\n self.update_rif(cr, uid, ids, context=context)\n return True", "def run(self):\n # TODO: Clean this up to better facilitate running as client: There's \n # still too much being done in this function.\n #\n # Only start if we've been appropriately initialised\n # TODO: Are assertion checks stripped out in optimised builds? Is this\n # the wrong method for an important check?\n assert self.isInitialised, \"Detective must be initialise()d before \\\nrunning.\"\n #\n ## If not secretive, announce our cards ################################\n if not self.SECRETIVE:\n announcestr = \"Preparing for battle. I hold cards: \"\n for card in self.myCards:\n announcestr += game.CARDNAMES[card]+\", \"\n self.hook_notifydebug(announcestr[:-2],\"Velma.run\")\n #\n #\n # TODO: Move the following commented code stack to a test routine.\n # Miss Scarlet known to be culprit\n #ui.dbgstatus('tweak','Miss Scarlet known culprit')\n #for ixPlayer in range(1,self.nPlayers):\n # self.event_pass(character=4,room=8,weapon=19,player=ixPlayer)\n # Kitchen known to be scene\n #ui.dbgstatus('tweak','Kitchen known scene')\n #for ixPlayer in range(1,self.nPlayers):\n # self.event_pass(character=0,room=9,weapon=19,player=ixPlayer)\n # Unseen answer 1 Plum/Billiard/Wrench\n #ui.dbgstatus('tweak','Unseen answer from 1')\n #self.event_unseenresponse(character=1,room=12,weapon=20,shower=1,viewer=3)\n # 1 known to have Peacock\n #ui.dbgstatus('tweak','1 known has Peacock')\n #self.event_seenresponse(card=3,shower=1,viewer=0)\n # 1 known not to have candlestick\n #ui.dbgstatus('tweak','1 known without candlestick')\n #self.event_pass(character=0,room=8,weapon=16,player=1)\n # 2 known to have knife\n #ui.dbgstatus('tweak','2 known has knife')\n #self.event_seenresponse(card=15,shower=2,viewer=0)\n # 2 known to have either White or Lounge or Candlestick\n #ui.dbgstatus('tweak','Unseen answer from 2')\n #self.event_unseenresponse(character=5,room=7,weapon=16,shower=2,viewer=1)\n # 3 known has ballroom\n #ui.dbgstatus('tweak','3 known has ballroom')\n #self.event_seenresponse(card=10,shower=3,viewer=0)\n #\n #\n while not self.isGameOver:\n # Output everybody's identity and position on the board. This \n # information is not privileged, and should be helpful in ensuring\n # consistency between what Velma thinks is going on and the state\n # of the real-world board\n for ixPlayer in range(self.nPlayers):\n self.hook_notifydebug(\"Player \"+str(ixPlayer)+\" is \"+\n game.CARDNAMES[game.CHARS[self.playerCharIxs[ixPlayer]]]+\n \" at \"+\n str(self.charLocations[self.playerCharIxs[ixPlayer]]),\n \"Velma.run\")\n #\n # Remind our conversant of any pre-set scenario\n if self.DBGSCENARIOREMINDER:\n self.hook_notifydebug('Reminder: \\n' + self.DBGSCENARIOREMINDER,\n \"Velma.run\")\n #\n # If we're not competing with our conversant, plot our knowledge\n if not self.SECRETIVE:\n self.hook_displaysuspicions()\n #\n if self.ixHotSeat == 0:\n self.move()\n else:\n self.hook_observemove()\n #\n # The hot seat increments, and skips over any players previously\n # knocked out\n self.ixTurn += 1\n self.ixHotSeat = (self.ixHotSeat + 1) % self.nPlayers\n while self.playersOusted[self.ixHotSeat]:\n self.ixHotSeat = (self.ixHotSeat + 1) % self.nPlayers", "def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right',\n 'Left',\n 'Suck',\n 'NoOp']))", "def _update_aliens(self):\n self._check_fleet_edges()\n self.aliens.update()\n if pygame.sprite.spritecollideany(self.sideways_ship, self.aliens):\n self._sideways_ship_hit()\n self._check_aliens_leftmost_edge()", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))", "def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()", "def level1AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n #values['r']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newFakeAgent()\n values['j']['agent'] = AI_server.newFakeAgent()", "def vrules(self):\n ...", "def update_agent(self):\n if self.ready:\n choice_indices, states, rewards, sucstates, episode_active = self.get_minibatch()\n td_errors = self.agent.TD_update(states, rewards, sucstates, episode_active, gamma=self.gamma)\n self.mem_error[choice_indices.tolist()] = td_errors", "def detect_collisions_and_modify_states(self, **kwargs):\n checked_agents = set()\n for agent1 in self.agents.values():\n if not (isinstance(agent1, CollisionAgent) and isinstance(agent1, VelocityAgent)):\n continue\n checked_agents.add(agent1.id)\n for agent2 in self.agents.values():\n if not (isinstance(agent1, VelocityAgent) and isinstance(agent2, CollisionAgent)):\n continue\n if agent1.id == agent2.id: continue # Cannot collide with yourself\n if agent2.id in checked_agents: continue # Already checked this agent\n dist = np.linalg.norm(agent1.position - agent2.position)\n combined_sizes = agent1.size + agent2.size\n if dist < combined_sizes:\n self._undo_overlap(agent1, agent2, dist, combined_sizes)\n self._update_velocities(agent1, agent2)", "def test_ipam_vrfs_update(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def run(self):\n LOGGER.info(\n SVC_START_MSG.format(\n datetime.datetime.now()\n )\n )\n\n LOGGER.info('Running active agents.')\n\n while True:\n active_agent_list = Agent.objects.filter(is_active=True)\n LOGGER.debug(\n ACTVE_AGENTS_MSG.format(\n ', '.join(\n [agent.name for agent in active_agent_list]\n )\n )\n )\n\n try:\n mail_agent = Agent.objects.get(name='Mail')\n if mail_agent not in active_agent_list:\n LOGGER.info(\n INACTIVE_MAIL_MSG\n )\n except Agent.DoesNotExist:\n pass\n\n for agent in active_agent_list:\n LOGGER.debug(START_AGENT_MSG.format(agent.name))\n\n try:\n module, class_name = get_mod_and_class(agent.script)\n LOGGER.debug('Agent module: {}'.format(module))\n LOGGER.debug('Agent class: {}'.format(class_name))\n AgentClass = getattr(\n importlib.import_module(module),\n class_name\n )\n agent = AgentClass()\n agent.run()\n\n except ModuleNotFoundError as err:\n LOGGER.exception(\n \"Check for misspelling of the agent's script name.\\n\"\n f\"Agent > name: {agent.name} |\"\n f\" script: {agent.script}\\n\"\n )\n\n LOGGER.debug(\n ENGINE_SLEEP_MSG.format(AGENT_RUN_FREQUENCY)\n )\n time.sleep(AGENT_RUN_FREQUENCY)", "def update_villan_movement(self):\n for v in self.villans:\n for v2 in self.villans:\n if v != v2:\n if self.check_collision(v,v2):\n self.collision(v,v2)\n\n v.x += v.dx\n v.y += v.dy\n\n if v.y > self.frame_height or v.y < -v.r*2 or v.x > self.frame_width or v.x < -v.r*2:\n self.villans.remove(v)", "def step(self):\n try:\n self.agents.sort(key=lambda x: x.dist)\n except Exception as e:\n print(e)\n\n for agent in self.agents:\n try:\n agent.step()\n except Exception as e:\n print(e)\n\n\n # Removes agents if they reach exit\n for exit in self.model.exits:\n x, y = exit.pos[0] * 6 + 1, exit.pos[1] * 6 + 1\n if agent.node == (x, y):\n try:\n agent.saved()\n except Exception as e:\n print(e)", "def vistrailChanged(self):\n from vistrails.gui.vistrails_window import _app\n select_node = True\n if _app._previous_view and _app._previous_view.window() != self.window():\n select_node = False\n self.scene().setupScene(self.controller, select_node)\n if self.controller and self.controller.reset_version_view:\n self.scene().fitToAllViews()\n if self.controller:\n # self.versionProp.updateVersion(self.controller.current_version)\n self.versionProp.updateVersion(self.controller.current_version)\n self.emit(QtCore.SIGNAL(\"vistrailChanged()\"))", "def updateVisits(self):\n self.nVisits += 1", "def step(self):\n updating_env = {} if self.independent_update else self.env\n for a in self.agents:\n if self.i % a.period == 0:\n action = a(self.env)\n if a.name is not None:\n updating_env[a.name] = action\n if self.independent_update:\n self.env.update(updating_env)\n self.i += 1", "def update_monster(self):\n\n\t\t# if nothing else gets added to this (no other changes to update) you could delete\n\t\t# this function and simply call self.choose_guard() in its place\n\t\tself.guarded_area = self.choose_guard()", "def infect(self, viral_load):\n if self.health <= 29:\n self.health = self.health - (0.1 * viral_load)\n elif self.health > 29 and self.health < 50:\n self.health = self.health - (1.0 * viral_load)\n elif self.health > 50:\n self.health = self.health - (2.0 * viral_load)", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def update(self):\n super(AgentSearchViewlet, self).update()\n\n context = aq_inner(self.context)\n self.membership = getToolByName(context, 'portal_membership')", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def test_vs_scoring_vina():\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/crystal_ligand.sdf'))\n vs.score(function='autodock_vina',\n protein=os.path.join(test_data_dir, 'data/dude/xiap/receptor_rdkit.pdb'))\n mols = list(vs.fetch())\n assert_equal(len(mols), 1)\n mol_data = mols[0].data\n assert_in('vina_affinity', mol_data)\n assert_in('vina_gauss1', mol_data)\n assert_in('vina_gauss2', mol_data)\n assert_in('vina_hydrogen', mol_data)\n assert_in('vina_hydrophobic', mol_data)\n assert_in('vina_repulsion', mol_data)", "def _activate(self):\n self.game.lives += 1", "def test_avgs_forced(self):\n u = leabra.Unit()\n\n for t in range(10):\n u.force_activity(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n for name in ['avg_ss', 'avg_s', 'avg_m', 'avg_s_eff']:\n self.assertTrue(getattr(u, name) != 0.15)", "def update_agent_orientation_vector(self, DEBUG=False):\n count = 0\n for agent in self.agents:\n agent_dir = agent.getOrientation()\n agent_loc = agent.getz()\n for i, each_task in enumerate(self.tasks):\n angle_to_move_in = compute_angle_in_rad(agent_loc, each_task.getloc())\n angle_you_must_turn = angle_to_move_in - agent_dir\n angle_you_must_turn_bounded = np.arctan2(np.sin(angle_you_must_turn), np.cos(angle_you_must_turn))\n self.orientation[count][i] = angle_you_must_turn_bounded\n count += 1\n if DEBUG:\n print('orientation to all tasks is ', self.orientation)", "def vote(self, agents):\n\n suspects = []\n known_impostor = -1\n # Check which agents the current agent still suspects\n for a in agents:\n if self.km.knows_imp(self.agent_id, a.agent_id):\n known_impostor = a.agent_id\n self.logger.log(f\"Crewmate {self.agent_id} suspects {a.agent_id}\", Logger.LOG | Logger.PRINT_VISUAL)\n elif self.km.suspects(self.agent_id, a.agent_id):\n suspects.append(a.agent_id)\n self.logger.log(f\"Crewmate {self.agent_id} suspects {a.agent_id}\", Logger.LOG | Logger.PRINT_VISUAL)\n\n if known_impostor != -1:\n vote = known_impostor\n else:\n # Randomly vote for an agent on the suspect-list\n vote = random.sample(suspects, 1)[0]\n\n # If you are not yet sure, there is a probability that you vote pass.\n # This probability increases if you suspect more people (and are therefore less sure)\n threshold = (len(suspects) / (self.num_crew + self.num_imp)) * 0.5\n if random.random() < threshold:\n vote = -1\n\n self.logger.log(f\"Crewmate {self.agent_id} votes for {vote}\\n\", Logger.PRINT_VISUAL | Logger.LOG)\n return vote", "def update_alive_status(self):\n self.alive = self.health > 0", "def test_ipam_vlans_partial_update(self):\n pass", "def editor_multi_agent_example():\n agent_definitions = [\n AgentDefinition(\"uav0\", agents.UavAgent, [sensors.RGBCamera, sensors.LocationSensor]),\n AgentDefinition(\"uav1\", agents.UavAgent, [sensors.LocationSensor, sensors.VelocitySensor])\n ]\n env = HolodeckEnvironment(agent_definitions, start_world=False)\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 5, 10])\n\n for i in range(10):\n env.reset()\n env.act(\"uav0\", cmd0)\n env.act(\"uav1\", cmd1)\n for _ in range(1000):\n states = env.tick()", "def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states", "def approve_all(cls):\n for year, month in cls.all_dates:\n print(\"Checking\", year, month)\n vintage = Vintage(year, month)\n vintage.validate()", "def UpdateNode(self, result, actions):\n self.visits += 1\n self.wins += (result > 0)\n self.losses += (result < 0)\n self.draws += (result == 0)\n self.Q = self.Q + (result - self.Q)/self.visits\n \n # update rave values\n for a in actions:\n self.N_AMAF[a] += 1\n if not a in self.Q_AMAF:\n self.Q_AMAF[a] = 0.5\n self.Q_AMAF[a] = self.Q_AMAF[a] + (result - self.Q_AMAF[a])/self.N_AMAF[a]\n else:\n self.Q_AMAF[a] = self.Q_AMAF[a] + (result - self.Q_AMAF[a])/self.N_AMAF[a]", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def vitamins(self, vitamins: List[RecipeObjectNutrientsCalories]):\n\n self._vitamins = vitamins", "def observe(self):\r\n self.rect.center = self.agent.rect.center\r\n \r\n # Control Points.\r\n # All control points are visible.\r\n control_points = [\r\n {\r\n 'team': o.team.name,\r\n 'location': o.rect.center\r\n } \r\n for o in self.world.control_points\r\n ]\r\n\r\n # Walls.\r\n # Only walls within range within range are visible.\r\n # Simplification that seeing part of a wall (Rect)\r\n # means seeing the entire wall-part does seem reasonable.\r\n walls = [\r\n {\r\n 'top': o.rect.top,\r\n 'left': o.rect.left,\r\n 'bottom': o.rect.bottom,\r\n 'right': o.rect.right\r\n }\r\n for o in self.world.visible_objects(self, self.world.walls)\r\n ]\r\n \r\n # Ammo Packs.\r\n # Only ammo packs within range are visible.\r\n ammo_packs = [ {'location': o.rect.center}\r\n for o in self.world.visible_objects(self, self.world.ammo_packs)\r\n ]\r\n\r\n # Agents.\r\n # Only agents within range are visible, whether they are on your own team\r\n # or on the other team.\r\n agents = []\r\n for team in self.world.teams:\r\n agents += [\r\n {\r\n 'team': team.name,\r\n 'location': agent.rect.center,\r\n 'direction': agent.direction,\r\n 'id': agent.number\r\n }\r\n for agent\r\n in self.world.visible_objects(self, team)\r\n if agent != self.agent\r\n ]\r\n\r\n observation = {\r\n 'id': self.agent.number,\r\n 'location': self.agent.rect.center,\r\n 'ammo': self.agent.ammo,\r\n 'direction': self.agent.direction,\r\n 'team': self.agent.team.name,\r\n 'respawn': not self.agent.alive,\r\n 'agents': agents,\r\n 'controlpoints': control_points,\r\n 'walls': walls,\r\n 'ammopacks': ammo_packs,\r\n }\r\n \r\n return observation", "def update(self):\n if not self.in_flight():\n if self.rect.centerx > 250:\n self.xMove = 0\n self.yMove = 0\n print 'y'\n self.xMove = self.v_x\n self.yMove = self.v_y\n self.rect = self.rect.move(self.xMove,self.yMove)\n if (self.rect.centerx > 1000) or (self.rect.centerx < 0) or (self.rect.centery > 800) or (self.rect.centery < 0):\n self.reset()\n if self.in_flight():\n self.v_y += 0.2\n pygame.time.delay(5)", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def process(self, car):\n car.check_coordination(self)", "def do_PUT(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n if \"activate\" in rest_params:\n auth_tag=json_body['auth_tag']\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if agent['virtual']:\n raise Exception(\"attempting to activate virtual AIK using physical interface for %s\"%agent_id)\n\n if common.STUB_TPM:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n ex_mac = crypto.do_hmac(agent['key'],agent_id)\n if ex_mac == auth_tag:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n raise Exception(\"Auth tag %s does not match expected value %s\"%(auth_tag,ex_mac))\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n elif \"vactivate\" in rest_params:\n deepquote = json_body.get('deepquote',None)\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if not agent['virtual']:\n raise Exception(\"attempting to activate physical AIK using virtual interface for %s\"%agent_id)\n\n # get an physical AIK for this host\n registrar_client.init_client_tls(config, 'registrar')\n provider_keys = registrar_client.getKeys(config.get('general', 'provider_registrar_ip'), config.get('general', 'provider_registrar_tls_port'), agent_id)\n # we already have the vaik\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])\n if not tpm.check_deep_quote(hashlib.sha1(agent['key']).hexdigest(),\n agent_id+agent['aik']+agent['ek'],\n deepquote,\n agent['aik'],\n provider_keys['aik']):\n raise Exception(\"Deep quote invalid\")\n\n self.server.db.update_agent(agent_id, 'active',True)\n self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n else:\n pass\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"PUT for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return", "def _migrate(self):\n\t\tchoice_list = [s for s in self.site.neighbors if s != self.site]\n\t\tif len(choice_list) > 0: \n\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\tif choosed.resource > self.site.resource:\n\t\t\t\tchoosed.add_agent(self)", "def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations", "def update_vie(self):\n self.essais_restant[\"text\"] = \"Vous disposez de {} vies\".format(self.jeu.get_nb_vie())", "def main(var):\n var = getEnvironmentVars(var)\n\n # Connect to the local libvirtd socket read only\n conn = libvirt.openReadOnly(\"qemu:///system\")\n if conn == None:\n logger.error(\"Failed to open connection to the hypervisor\")\n sys.exit(1)\n domainIDs = conn.listDomainsID()\n\n\n result=runCmd(\"checkjob --xml \" + var['pbs-jobid'])\n moabVars = getMoabVars(result)\n\n # We need the libvirtd instance_name for monitoring \n if 'VM_NAME' in moabVars:\n instance_name = moabVars['libvirtName']\n else:\n logger.error(\"VM_NAME not found in Moab Job Metadata\")\n exit(1)\n\n logger.info(\"VM instance_name=%s\", instance_name)\n\n logger.info(\"VM is ACTIVE -> continue Monitoring via libvirtd\")\n try:\n domain = conn.lookupByName(instance_name)\n except:\n logger.error(\"ERROR: unable to find VM in libvirt! Name=%s\", instance_name)\n # In this case the VM musst be terminated in OpenStack.\n # Possible reason: the VM was started on a wrong Compute-Node\n #deleteVM(nova, vm)\n exit(1)\n\n while True:\n try:\n state, reason = domain.state()\n except:\n logger.info(\"Domain vanished: asuming it was terminated in OpenStack\")\n break\n\n if state == libvirt.VIR_DOMAIN_RUNNING:\n logger.info(\"The state is VIR_DOMAIN_RUNNING\")\n elif state == libvirt.VIR_DOMAIN_SHUTDOWN:\n logger.info(\"The state is VIR_DOMAIN_SHUTDOWN\")\n break\n elif state == libvirt.VIR_DOMAIN_SHUTOFF:\n logger.info(\"The state is VIR_DOMAIN_SHUTOFF\")\n break\n elif state == libvirt.VIR_DOMAIN_CRASHED:\n logger.info(\"The state is VIR_DOMAIN_CRASHED\")\n break\n else:\n logger.info(\"The libvirtd state is unknown.\")\n break\n logger.debug(\"The reason code is %s\", str(reason))\n time.sleep(30)\n if shutdown_flag.isSet():\n logger.debug(\"Shutdown-Event: Exit main loop\")\n break\n\n logger.info(\"Monitoring is finished. Exit\")\n\n return", "def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)", "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()", "def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()", "def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies", "def test_update_impact_level(self):\n pass", "def inhabitant_check(self):\n\t\tchanged = False\n\t\tif self.happiness > self.__get_data(\"happiness_inhabitants_increase_requirement\") and \\\n\t\t\t self.inhabitants < self.inhabitants_max:\n\t\t\tself.inhabitants += 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants increase to %s\", self, self.inhabitants)\n\t\telif self.happiness < self.__get_data(\"happiness_inhabitants_decrease_limit\") and \\\n\t\t self.inhabitants > 1:\n\t\t\tself.inhabitants -= 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants decrease to %s\", self, self.inhabitants)\n\n\t\tif changed:\n\t\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Supplying_citizens_with_resources\n\t\t\tself.alter_production_time( 1 + (float(self.inhabitants)/10))\n\t\t\tself._changed()", "def level2AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newAgent(2)\n values['j']['agent'] = AI_server.newFakeAgent()", "def normal_update(self, game, elfDict):\n self.game = game # update game\n self.my_elves = [elf for elf in elfDict.values() if not elf.elf.already_acted] # update self.my_elves\n self.game = game # update self.game\n self.portals.portals_update(game) # update portals (the object)\n self.my_castle = game.get_my_castle()", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update_festival_details(self):\n self.compute_festivals()\n self.assign_relative_festivals()", "def collect_OARV(self, observation: np.ndarray, action, reward: float, V_estimate: float) -> None:\n self.V_estimates.append(V_estimate)\n super().collect_OAR(observation, action, reward)", "def load_agents(self, agents):\n self.agents = agents", "def update(self, elapsed):\n delta = 35 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('threatening-stimulus-releaser')\n fear = self.behavior_system.robot.emotion_system.emotion_fear\n\n # TODO: incorporate fear emotion\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == fear:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def update_latent(self):\n self.scenario.update_latent()", "def agent_updated(self, context, payload):\n if payload['admin_state_up'] != self.admin_state_up:\n self.admin_state_up = payload['admin_state_up']\n if self.admin_state_up:\n self.needs_resync = True\n else:\n for pool_id in self.cache.get_pool_ids():\n self.destroy_device(pool_id)\n LOG.info(_(\"agent_updated by server side %s!\"), payload)", "def multi_agent_example():\n env = holodeck.make(\"CyberPunkCity-FollowSight\")\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 0])\n for i in range(10):\n env.reset()\n env.tick()\n env.act(\"uav0\", cmd0)\n env.act(\"nav0\", cmd1)\n for _ in range(1000):\n states = env.tick()\n pixels = states[\"uav0\"][\"RGBCamera\"]", "def non_social_action(self):\n\n if not self.agent.done:\n if self.opponenet.cashed and self.opponenet.pumps >= self.agent.pumps:\n self.EV = self.opponenet.pumps + np.random.randint(1,5)\n\n self.action_gating()", "def update_agent_is_idle_based_on_class(self):\n # method to update class based on external params\n for counter, agent in enumerate(self.agents):\n isAgentIdle = not agent.isBusy\n self.is_agent_idle[counter][0] = isAgentIdle", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def ExecuteBeforeSolutionLoop(self):\n super().ExecuteBeforeSolutionLoop()\n num_of_vaviables = len(self.variables) + len(self.nonhistorical_variables)\n self.values = [[-1e6] * num_of_vaviables for _ in self.found_positions]", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_going_down(self):", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def _update_aliens(self):\n\t\tself._check_fleet_edges()\n\t\tself.aliens.update()\n\n\t\t#check if ship collides with aliens\n\n\t\tpygame.sprite.spritecollideany(self.ship, self.aliens)\n\n\t\tself._ship_hit()", "def audit():\n governance = web3.ens.resolve('ychad.eth')\n registry = load_registry()\n vaults = load_vaults(registry)\n for v in vaults:\n if v.vault.governance() != governance:\n secho(f'{v.name} vault governance == {v.vault.governance()}', fg='red')\n print(f'{v.vault}.setGovernance({governance})')\n if v.strategy.governance() != governance:\n secho(f'{v.name} strategy governance == {v.strategy.governance()}', fg='red')\n print(f'{v.strategy}.setGovernance({governance})')", "def update(self, elapsed):\n delta = 18 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('undesired-stimulus-releaser')\n sorry = self.behavior_system.robot.emotion_system.emotion_sorrow\n\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == sorry:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def update(self):\n super().update()\n self.decelerate()\n #check for a collisison with all rock types\n self.checkForRockCollisions()\n #when the ship gets hit by a rock, it enters a period of invulnerability. we need to make sure that period ends at the proper time\n self.checkGracePeriodDuration()\n #movement stuff\n if (self.isAcceleratingForward):\n self.accelerateForwards()\n if (self.isRotatingLeft):\n self.rotateLeft()\n if (self.isRotatingRight):\n self.rotateRight()", "def check_cves(self, component_analyses):\n cves = component_analyses[\"cve\"]\n for c in cves:\n assert \"id\" in c\n assert \"cvss\" in c\n self.check_cve_value(c[\"id\"])", "def update(self):\n\n if self.ai_func is not None:\n self.ai_func(self.game, self)\n\n self.turns = self.turns + 1\n if self.stats.hp < self.stats.max_hp:\n self.turns_since_regen = self.turns_since_regen + 1\n\n if self.turns_since_regen >= self.regen_rate:\n self.stats.hp = self.stats.hp + 1\n self.turns_since_regen = 0", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def dvs_multiple_uplinks_active(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.show_step(7)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n self.show_step(8)\n self.show_step(9)\n self.fuel_web.vcenter_configure(\n cluster_id,\n target_node_2=self.node_name('slave-02'),\n multiclusters=True)\n\n self.show_step(10)\n plugin.enable_plugin(cluster_id, self.fuel_web, au=3, su=0)\n\n self.show_step(11)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])", "def _update_aliens(self):\n self._check_fleet_edges()\n\n self.aliens.update()\n if pygame.sprite.spritecollideany(self.ship, self.aliens) != None :\n print(\"SHIP Hit !\")\n self._ship_hit()\n\n # Verify if aliens reach the bottom of the screen\n self._check_aliens_bottom()", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()", "def _update(self):\n self.cv.update()", "def initialization_step(self):\n # Update where agents are\n self.update_agent_location_vector()\n # update task locations\n self.update_task_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()", "def Start(self):\n\n # Keep updating until all agents have completed their waypoints\n all_finished = False\n while not all_finished:\n all_finished = True\n self.player_client.read()\n for agent in self.agents:\n all_finished = agent.update() and all_finished", "def hesitant_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps - np.random.randint(1,5)\n else:\n if self.opponenet.cashed:\n self.EV = self.opponenet.pumps + 1\n elif self.opponenet.popped:\n if not self.stopCount:\n if self.agent.pumps == 0:\n self.EV = np.random.randint(1,10)\n else:\n self.EV = self.agent.pumps\n self.stopCount = True\n self.action_gating()", "def set_preferred_velocities(self):\n for i in range(self.simulator_.num_agents):\n goal_vector = self.goals_[i] - self.simulator_.agents_[i].position_\n\n if rvo_math.abs_sq(goal_vector) > 1.0:\n goal_vector = rvo_math.normalize(goal_vector)\n\n self.simulator_.set_agent_pref_velocity(i, goal_vector)" ]
[ "0.6244821", "0.61558896", "0.6037154", "0.57862145", "0.56732225", "0.56598973", "0.5629421", "0.56235677", "0.5466714", "0.54048747", "0.53794134", "0.5335644", "0.53047174", "0.52038425", "0.5185621", "0.5175854", "0.51662546", "0.5162844", "0.5150552", "0.51480347", "0.5138042", "0.5134468", "0.512663", "0.5100924", "0.5098726", "0.50958264", "0.50840867", "0.50814515", "0.50723225", "0.50601196", "0.5056442", "0.50536203", "0.5017762", "0.50087327", "0.49955076", "0.49879363", "0.4986763", "0.49785933", "0.49761954", "0.49761954", "0.49684632", "0.49550077", "0.49378455", "0.49374303", "0.4937097", "0.49333045", "0.4933049", "0.4929525", "0.49125835", "0.4909979", "0.49068978", "0.4893209", "0.48930058", "0.4891049", "0.48908123", "0.48872957", "0.48860773", "0.48809144", "0.48785022", "0.48739082", "0.48663732", "0.48637068", "0.48613977", "0.48557884", "0.48510587", "0.48463663", "0.4840175", "0.483738", "0.48352915", "0.48320597", "0.48305273", "0.48210326", "0.4817529", "0.4812822", "0.48114148", "0.48094088", "0.48083043", "0.48080352", "0.48018175", "0.4798776", "0.4795651", "0.47942325", "0.47905022", "0.47901246", "0.4786823", "0.47861442", "0.4780622", "0.47789547", "0.47779524", "0.47778004", "0.477342", "0.47718722", "0.47696996", "0.4769307", "0.47688094", "0.4762238", "0.4756694", "0.475582", "0.47555396", "0.4755239" ]
0.562963
6
Event handler to count up the number of days
def __next_step(self, state) -> None: self.days += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_days(self):\r\n return 1", "def get_number_days(self):\r\n raise NotImplementedError", "def hindu_day_count(cls, date):\n return date - cls.EPOCH", "def day(self):\n return 0", "def day(self):\n return 0", "def Daysleftverification():\n pass", "def elapsed_days(self) -> int:\n return (datetime.today() - self.release_datetime).days", "def compute_real_days(self):\n if (self.end_date > date.today()):\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, date.today())\n else:\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, self.end_date)", "def days(input=None):\n return get(input).days", "def numOfDays():\n\n print(\"Podaj rok, miesiac oraz dzien pierwszej daty: \")\n inputs = [input() for i in range(3)]\n\n print(\"Podaj rok, miesiac oraz dzien drugiej daty: \")\n inputs1 = [input() for i in range(3)]\n\n d0 = date(inputs[0], inputs[1], inputs[2])\n d1 = date(inputs1[0], inputs1[1], inputs1[2])\n delta = abs(d1 - d0)\n \n print(delta.days)\n return abs(delta.days)", "def size(self):\n\t\treturn (self.dates[1] - self.dates[0]).days", "def day(self):\n return self._days", "def get_interactive_days(self):\n answer = input(\"Press return to get entries of past day or input number of days to go back in time: \")\n if answer == '':\n days = 1\n else:\n try:\n days = int(answer)\n except:\n print(\"You didn't enter a number, assuming 1 day.\")\n days = 1\n return days", "def days(self):\n return int(self.hours / 24)", "def days(self):\n return self._days", "def get_no_of_days(self, slug_ls):\n date_ls = []\n #for each country get first case confirmed date\n for i in slug_ls:\n url = self.base_url+\"dayone/country/\"+i+\"/status/confirmed\"\n response = requests.get(url)\n date_ls.append(response.json()[0]['Date'])\n \n t1 = date.today()\n days = []\n #Calculate 'days since first case' for each country\n for i in range(len(date_ls)):\n t2 = datetime.datetime.strptime(date_ls[i],\"%Y-%m-%dT%H:%M:%SZ\")\n days.append(str(t1-t2.date())[0:4])\n return days", "def _get_number_of_days(self, date_from, date_to, employee_id):\n\t\tfrom_dt = fields.Datetime.from_string (date_from)\n\t\tto_dt = fields.Datetime.from_string (date_to)\n\t\tif employee_id:\n\t\t\temployee = self.env['hr.employee'].browse (employee_id)\n\n\t\t\t# Testing 16/11/19\n\t\t\tshift = employee.resource_calendar_ids\n\t\t\treturn employee.get_work_days_count (from_dt, to_dt, shift)\n\n\t\ttime_delta = to_dt - from_dt\n\t\treturn math.ceil (time_delta.days + float (time_delta.seconds) / 86400)", "def calculate_days(self):\n tweet_time = self.data['created_at']\n birthday = self.data['user']['created_at']\n my_dates = {\"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6, \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10,\n \"Nov\": 11, \"Dec\": 12}\n # This could have easily been cast into one of the numerous datetime function's immediately, however\n # it was causing a major slowdown to the program and so the below was a quick fix.\n ######################################################################\n # NOTICE: IF SOMETHING BREAKS THIS IS MOST LIKELY TO BE WHAT IT IS #\n ######################################################################\n tweet_time2 = [my_dates[tweet_time[4:7]], int(tweet_time[8:10]), int(tweet_time[26:])]\n birthday2 = [my_dates[birthday[4:7]], int(birthday[8:10]), int(birthday[26:])]\n first = date(tweet_time2[2], tweet_time2[0], tweet_time2[1])\n second = date(birthday2[2], birthday2[0], birthday2[1])\n final = first - second\n days = final.days\n follows = self.data['user']['followers_count']\n favorites = self.data['user']['favourites_count']\n statuses = self.data['user']['statuses_count']\n favpd = favorites/days\n folpd = follows/days\n statpd = statuses/days\n return {\"days\": final.days, \"folpd\": folpd, \"favpd\": favpd, \"statpd\": statpd}", "def calculate_days(time):\n return int(time / 86400)", "def days(self) -> Optional[int]:\n return pulumi.get(self, \"days\")", "def days(self):\n ends_at = created_at = datetime.datetime.now().replace(tzinfo=utc)\n if self.created_at:\n created_at = self.created_at\n if self.ends_at:\n ends_at = self.ends_at\n return (ends_at - created_at).days", "def compute_days(start: date, end: date) -> int:\n delta = end - start\n return delta.days + 1", "def compute(self, days=1):\n raise NotImplementedError", "def days(self, days):\n\n self._days = days", "def days(self, days):\n\n self._days = days", "def GetDayNumber(self, StartDay, Today):\n DayFrom = datetime.strptime(StartDay, '%Y-%m-%d %H:%M:%S')\n DayNumber = int((Today - DayFrom).days) + 1\n return DayNumber", "def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)", "def _get_number_of_days(self, date_from, date_to):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_days = timedelta.days + float(timedelta.seconds) / 86400\n return diff_days", "def days_registered(self):\n days_registered = (datetime.utcnow() - self.date_joined).days\n if not days_registered:\n return 1\n return days_registered", "def make_count_change():\n \"*** YOUR CODE HERE ***\"", "def no_new_cases_count(day: int, month: int, year: int = 2020) -> int:\r\n \r\n # Your code goes here (remove pass)\r", "def day_06_a() -> int:\n return 0", "def count(self, counter, delta):\n pass # Do nothing", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def add_n_days(self, n):\n print(self)\n while n > 0:\n self.tomorrow()\n print(self)\n n -= 1", "def day(self) -> int:\n return pulumi.get(self, \"day\")", "def age(self):\n then = self.ship_date\n if self.status == 'delivered':\n now = self.event_time.date()\n else:\n now = datetime.datetime.now().date()\n delta = now - then\n return delta.days", "def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)", "def days_between(self, other):\n new_self = self.copy()\n new_other = other.copy()\n count=0\n if self.is_before(other):\n while(True):\n if new_self == new_other:\n break\n count-=1\n new_self.advance_one()\n elif self.is_after(other):\n while(True):\n if new_self==new_other:\n break\n count+=1\n new_other.advance_one()\n\n return count", "def subNDays(self, N):\n print self\n for i in range(N):\n self.yesterday()\n print self", "def days_per_month(leap=False):\n\n ndays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if leap:\n ndays[1]+= 1\n return ndays", "def data_refresh_window_days(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"data_refresh_window_days\")", "def searched_per_day(df):\n return df.groupby([pd.TimeGrouper('D')]).apply(lambda row: \n len(row[row['action'] == 'searchResultPage']))", "def joined_days(self):\n return (timezone.now() - self.user.date_joined).days", "def get_num_messages_daily(self, date):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num FROM messages WHERE created_on > %s AND created_on < %s'\n cursor.execute(query, (date, end_date))\n count = cursor.fetchall()\n return count[0]['num']", "def day_05_a() -> int:\n return 0", "def get_runing_days(start_date_list,expired_date_list):\n min_d = min(start_date_list)\n max_d = max(expired_date_list)\n num_days = (max_d-min_d).days +1\n return num_days", "def day_to_day(self):\n while True:\n yield 0", "def get_num_likes_daily(self, date, like):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num ' \\\n 'FROM vote ' \\\n 'WHERE voted_on > %s AND voted_on < %s AND upvote = %s'\n cursor.execute(query, (date, end_date, like))\n count = cursor.fetchall()\n return count[0]['num']", "def numberOfEvents(self):\n raise NotImplementedError", "def main(args):\n \n y1, y2 = int(args[0]), int(args[1])\n print(f\"Aantal dagen in de periode [{y1},{y2}] = {num_of_days(y1, y2)}\")", "def DAYS(end_date, start_date):\n return (_make_datetime(end_date) - _make_datetime(start_date)).days", "def count(cls,action='login',days=1,param=None):\n dt = datetime.utcnow() - timedelta(days=days)\n q = SuiDau.all(keys_only=True).filter('action =',action).filter('date >=',dt)\n if param: q.filter('object =',param)\n return q.count()", "def compute_total_days(start, end):\n # Use the datetime module to subtract the dates (+1 if inclusive)\n return (end - start).days + 1", "def check_day_advance(self):\n days_ago = datetime.now().toordinal() - self.start_time.toordinal()\n if days_ago:\n # New day. Save data for the old day.\n self.save(days_ago = days_ago)\n self.start_time = datetime.now()\n # Reset all counters back to 0:00:00.\n for rd in self.row_detail_list:\n rd.time = '0:00:00'\n self.refresh_display()", "def daily_stats():\r\n count_total_each_user.delay()\r\n delete_non_activated_account.delay()", "def count_change(amount):\n \"*** YOUR CODE HERE ***\"\n\n return helper(1, amount)", "def day_06_b() -> int:\n return 0", "def increment_day(day, last_day_of_classes, result, agenda_type):\n if agenda_type == 'la':\n day += timedelta(days=7)\n return day\n\n if day.weekday() in [1, 2]:\n # Tuesday or Wednesday\n day += timedelta(days=1) # Tuesday or Wednesday\n else:\n day += timedelta(days=5) # Thursday\n if day > last_day_of_classes:\n for r in result:\n log(r)\n assert day <= last_day_of_classes, f'Day {day} falls after the \"\\\n flast day of classes {last_day_of_classes}'\n return day", "def add_days(self, number):\n self.days += number\n while self.days > self.months_list[self.month - 1]:\n if self.is_leap_year() and self.month == 2:\n self.days -= 29\n else:\n self.days -= self.months_list[self.month - 1]\n self.month += 1\n if self.month == 13:\n self.month = 1\n self.year += 1", "def interval_days(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_days\")", "def increment_daily_total(self, unique, property_id=None, value=1):\n key = (self.user_name, self.bucket_name, \"daily_event\", self.shard)\n property_id = property_id or _32_BYTE_FILLER\n column_id = \"\".join([\n self.id,\n property_id[0:16],\n pack_day(),\n property_id[16:32]])\n increment_counter(key, column_id=column_id, value=value)\n if unique:\n key = (\n self.user_name, \n self.bucket_name, \n \"daily_unique_event\", \n self.shard)\n increment_counter(key, column_id=column_id)", "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n if year1 <= year2 and month1 <= month2 and day1 <= day2:\n nOfDays = 0\n while nextDay < (year2, month2, day2):\n \n nOfDays += 1\n \n return nOfDays", "def number_of_tweets_per_day(df):\n \n var_date = pd.to_datetime(df['Date']) #creates a datetime variable from dates column\n df['Date'] = [i.date() for i in var_date] \n by_date = df.groupby('Date').count() #groups dataframe by date and counts number of tweets\n \n return by_date", "def get_num_replies_daily(self, date):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num ' \\\n 'FROM replies INNER JOIN messages ON messages.mid = replies.replied_to' \\\n ' WHERE created_on > %s AND created_on < %s'\n cursor.execute(query, (date, end_date))\n count = cursor.fetchall()\n return count[0]['num']", "def get_daynums(self):\n self.df = self.df.sort_values(by=['ticker', 'date'])\n daynum = 0\n daynums = []\n tick = None\n for i in self.df.index:\n daynums.append(daynum)\n if tick is not None:\n if self.df.ticker.loc[i] == tick:\n daynum += 1\n else:\n daynum = 0\n tick = self.df.ticker.loc[i]\n self.df['daynum'] = daynums\n print('head check daynums: {} \\n\\n'.format(self.df[['daynum', 'ticker', 'date']].head()))", "def day(self) -> int:\r\n return self._day", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def tickets(number, day, premium_seating):\n #fill in your code here. \n return 0.0", "def count(time):\n \n return len(events(time))", "def get_day():\n return handle_invalid_inputs(question_4, days)", "def UpdateCountsHandler(self):\n\n self.response.out.write('<br/><br/>Updating counts<br/>')\n MAX_COUNT = 200\n changesets = Changeset.all().order('-created_at').fetch(MAX_COUNT)\n\n date_of_first_changeset = changesets[0].created_at.date()\n date_of_last_changeset = changesets[-1].created_at.date()\n\n # if the same day for first and last write MAX_COUNT, skip next steps\n if date_of_last_changeset == date_of_first_changeset:\n update_count(date_of_first_changeset, MAX_COUNT)\n self.response.out.write('MAX_COUNT (%d) in this date (%s)<br/>' %\n (MAX_COUNT, str(date_of_first_changeset)) )\n return\n\n date_last = changesets[0].created_at.date()\n count_last = 0\n\n one_day = timedelta(days=1)\n\n for c in changesets:\n date_current = c.created_at.date()\n if date_current == date_last:\n count_last += 1\n else:\n if date_last - date_current > one_day:\n self.response.out.write('need to iterate between dates<br/>')\n d = date_current + one_day\n # iterate between dates, set counts to 0\n while d < date_last:\n self.response.out.write(str(d) + '<br/>')\n update_count(d, 0)\n d += one_day\n self.response.out.write(str(date_last)+': '+str(count_last)+'<br/>')\n is_new_entry = update_count(date_last, count_last)\n if not is_new_entry:\n self.response.out.write('not new entry<br/>')\n if not date_last == date_of_first_changeset:\n self.response.out.write(\n 'count for %s is already in datastore' % \n str(date_last)\n )\n return\n\n\n date_last = c.created_at.date()\n count_last = 1\n if c.created_at.date() == date_of_last_changeset:\n break\n \n self.response.out.write(str(changesets[0].created_at)+'<br/>')\n self.response.out.write(str(changesets[-1].created_at)+'<br/>')", "def day_change(\r\n self\r\n ):\r\n if self.r_Day_Night_variable.get() == 'Day':\r\n self.expos_compens = 0\r\n else:\r\n self.expos_compens = 25\r\n print(\"Changed to {} mode : Exposure compensation = {}\".format(self.r_Day_Night_variable.get(),self.expos_compens))", "def count():", "def checkio(from_date: date, to_date: date) -> int:\n result = 0\n while True:\n if from_date.weekday() == 5 or from_date.weekday() == 6:\n result += 1\n if from_date == to_date:\n break\n else:\n from_date += timedelta(days=1)\n return result", "def find_duration(discharge, enroll_date, discharge_date):\n #pass\n today = datetime.datetime.today()\n if discharge : #True\n return (discharge_date - enroll_date).days\n else:\n return (today - enroll_date).days", "def insert_day():\n analytics.insert_day(6)", "def calculate_seconds_in_days(days):\n return int(days * 86400)", "def add_num_days_col():\n days = []\n for i in range(len(df.index)):\n days.append(i + 1)\n days.reverse()\n df.insert(0, \"days\", days, True)", "def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date", "def count_events(self, time_from='', time_to=''):\n count_from, count_up = self._validate_input(time_from, time_to)\n\n count = 0\n #assimption: the user more often is interested in newest data\n for i in reversed(self.events_by_seconds):\n if i[0] >= count_up:\n continue\n if i[0] < count_from:\n break\n count+=i[1]\n return count", "def day_num(x):\r\n if x==\"Sunday\":\r\n return 0\r\n elif x==\"Monday\":\r\n return 1\r\n elif x==\"Tuesday\":\r\n return 2\r\n elif x==\"Wednesday\":\r\n return 3\r\n elif x==\"Thursday\":\r\n return 4\r\n elif x==\"Friday\":\r\n return 5\r\n elif x==\"Saturday\":\r\n return 6", "def date_to_days(self, date):\n date = str_to_date(date)\n return (date-self.start_date).days", "def days(self) -> localedata.LocaleDataDict:\n return self._data['days']", "def date_in_days(date):\n delta = delta_from_now(date)\n return (delta.days) + delta.seconds / 86400", "def getMessageCountPerDay(self):\n\n # NOTE: We first filter distinct record ids for this filter set\n # and then use those record ids as additional filter parameter when we\n # perform the actual query for message count by date. This workaround\n # is (?) required to not get duplicate record rows that we can't\n # `distinct` away when using `annotate`, due to some crazy db joins.\n # TODO: Revise the workaround\n\n # Query distinct record ids for this filter set\n distinct_records = Record.objects.filter(\n self.getQuery()).distinct().values(\"id\")\n\n\n # Query the sum of message counts per day for above filtered\n # records, ordered by date in ascending order\n return Record.objects.filter(id__in=distinct_records).values(\n \"report__date_range_begin\").annotate(\n date=TruncDay(\"report__date_range_begin\"),\n cnt=Sum(\"count\")).values(\"date\", \"cnt\").order_by(\"date\")", "async def test_source_up_to_dateness(self):\n self.set_source_parameter(\"date\", \"2019-06-01\")\n response = await self.collect()\n self.assert_measurement(response, value=str((datetime.now() - datetime(2019, 6, 1)).days))", "def get_total_days(x):\n if not isinstance(x, dt.timedelta):\n raise TypeError(str(type(x)))\n\n return x.total_seconds() / 86400.", "def number_of_tweets_per_day(df):\n \n df['Date'] = pd.to_datetime(df['Date']) #converts date column to datetime\n df['Date'] = df['Date'].dt.date #extract only the date part of the datetime in the date column \n twitter_cnt=df.groupby('Date').count() #group the dataframe by unique dates and calculate the number of tweets in each day\n twitter_cnt.reset_index(inplace = True) #reset index\n twitter_cnt.set_index('Date', inplace = True) #set date column as index \n \n return twitter_cnt", "def new_day(self):\n self.previous_days.append(self.energy_debt)\n self.energy_debt = defaultdict(lambda: 0.0)\n\n #TODO: add the settelement mechanism here", "def test_delta_28_days(self):\n input_ = (datetime.date(1999, 2, 11), datetime.date(1999, 3, 11))\n output = self.expander._get_next_days(*input_)\n expected = 28\n actual = (output[-1] - output[0]).days\n\n self.assertEqual(expected, actual)", "def count_inside(self):\n time.sleep(2) #1\n self.count += 1", "def get_skipped_days(self) -> int:\n return self._skipped_days.get()", "def weekday_activity(frame):\n\n data = DataFrame()\n data['weekday'] = DatetimeIndex(frame.inserted).weekday\n counts = DataFrame(arange(7)*0)\n return (counts[0]+data.weekday.value_counts()).fillna(0)", "def day_07_a() -> int:\n return 0", "def calculate_daily_hourly_billable_counts(self):\n booking_length = self.schedule_end - self.schedule_start\n booking_length_hours_total = booking_length.days * 24 + booking_length.seconds / 60 / 60\n booking_days = int(booking_length_hours_total / 24)\n booking_hours = ceil(booking_length_hours_total % 24)\n if booking_hours * Decimal(self.vehicle.type.hourly_rate) >= self.vehicle.type.daily_rate:\n booking_days += 1\n booking_hours = 0\n return booking_days, booking_hours", "def days(self) -> Optional[pulumi.Input[int]]:\n warnings.warn(\"\"\"Deprecated in favor of duration\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"days is deprecated: Deprecated in favor of duration\"\"\")\n\n return pulumi.get(self, \"days\")", "def day_05_b() -> int:\n return 0" ]
[ "0.7565068", "0.7157228", "0.6552486", "0.6474552", "0.6474552", "0.64385206", "0.63302714", "0.62566036", "0.62210876", "0.61641073", "0.6144609", "0.6134258", "0.6126481", "0.60720074", "0.60308474", "0.59748036", "0.59373456", "0.59308636", "0.59216416", "0.5906378", "0.58842075", "0.58044857", "0.5781829", "0.571199", "0.571199", "0.5708095", "0.57074803", "0.56820655", "0.5678041", "0.5674412", "0.5666683", "0.56577414", "0.56499296", "0.56489795", "0.56337297", "0.5618371", "0.5603564", "0.5594575", "0.5591817", "0.55702174", "0.5563188", "0.5558056", "0.5550557", "0.553603", "0.5535759", "0.55235595", "0.55173606", "0.55029833", "0.5490335", "0.5485557", "0.54855156", "0.5468208", "0.54636955", "0.54582787", "0.54539114", "0.5452011", "0.5446504", "0.54427516", "0.54411256", "0.5438606", "0.5434438", "0.543426", "0.543424", "0.5431812", "0.54216623", "0.5421176", "0.5419534", "0.5417912", "0.5412476", "0.5412476", "0.5387473", "0.5386666", "0.53858656", "0.53821987", "0.5380186", "0.537257", "0.53712326", "0.536865", "0.5360129", "0.53540117", "0.5344851", "0.5343074", "0.5334966", "0.53314584", "0.53264993", "0.5311767", "0.53099304", "0.528459", "0.52821237", "0.5273018", "0.5272656", "0.52687585", "0.5259844", "0.52561617", "0.525226", "0.5247272", "0.52456355", "0.52291155", "0.5225469", "0.52208894" ]
0.6448241
5
Isolate (Remove from Grid) a given share of infected people for the sicknessduration. Afterwards they need to be added again to the Grid as removed/dead/immune.
def execute(self, agent: Agent, state: SimState) -> None: if agent.is_quarantined(): if agent.state() is AgentState.DEAD or agent.state() is AgentState.IMMUNE or agent.state() is AgentState.REMOVED: grid = agent.grid() for row in range(grid.get_size()): for col in range(grid.get_size()): grid_pos = GridPos(np.uint(row), np.uint(col)) if not grid.is_occupied(grid_pos): grid.set_agent(agent, grid_pos) agent.set_pos(grid_pos) agent.set_quarantined(False) agent.grid().get_quarantinedAgents().remove(agent) state.add_to_quarantined_count(-1) return else: isolate_share = state.quarantine_share() # Share of infected cells to isolate infected = state.infected_count() if agent.state() == AgentState.INFECTIVE and state.get_quarantined_count() < isolate_share * ( infected + state.get_quarantined_count()): agent.set_quarantined(True) agent.grid().get_quarantinedAgents().append(agent) agent.grid().set_agent(None, agent.get_pos()) agent.get_scheduler().update_gui_state(agent.get_pos(), AgentState.EMPTY) state.add_to_quarantined_count(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def remove_stuck(traj,size):\n from numpy import sqrt, where\n \n r_min = traj.groupby('particle').first()\n r_max = traj.groupby('particle').last()\n\n pos_columns = ['x','y']\n dist = r_min[pos_columns] - r_max[pos_columns]\n dist_eu = sqrt(dist['x']**2+dist['y']**2)\n\n index_remove = dist_eu.index[where(dist_eu < size)]\n \n traj_new = traj\n for i in range(len(index_remove)):\n traj_new = traj_new[(traj_new['particle'] != index_remove[i])]\n \n return traj_new", "def remove_partner(self, other_person,s):\n self.number_of_partners -= 1\n self.current_partners.remove(other_person.identifier)\n \n if self.number_of_partners == 0:\n #no partners left -> single\n s.number_of_singles += 1\n s.singles.add(self.identifier)", "def test_kyc_delete_legal_share_holder_natural(self):\n pass", "def sandwich(self):\n if self.game.rules[\"capture\"] == \"custodial_capture\":\n self.remove_self()\n if self.game.rules[\"trapping\"]:\n for trapped_neighbor in [neighbor for neighbor in self.get_neighbors() if neighbor.trapped and self.position in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]:\n trapped_neighbor.untrap()\n self.trap()", "def personal_group_sanitization(obj_count, obj_ceiling, group_id):\n\tif obj_count > obj_ceiling and random.random() < 0.66:\n\t\tpersonal_group_trimming_task.delay(group_id, obj_count)", "def test_remove_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n spawn_not_in_list = SpawningProfile(\"spawn3\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn_not_in_list)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 1\n\n i.remove_spawning_profile(default_spawn)\n\n assert len(i.get_spawning_profile_list()) == 0\n assert not i.get_spawning_profile_list()", "def delete_ss(self, sub):\n # Suppression of one random simple surface (satisfying both criteria)\n random.seed(42)\n surface = random.randint(0, len(self.surfaces)-1)\n print(self.surfaces[surface]['label'])\n\n bck_map = self.surfaces[surface]['aims_ss']\n for voxel in bck_map[0].keys():\n self.skel.setValue(0, voxel[0], voxel[1], voxel[2])\n\n bck_map_bottom = self.surfaces[surface]['aims_bottom']\n for voxel in bck_map_bottom[0].keys():\n self.skel.setValue(0, voxel[0], voxel[1], voxel[2])\n\n save_subject = sub\n return save_subject", "def cure(self, s):\n if self.disease_status == 1:\n s.number_of_symptomatic -= 1\n elif self.disease_status == 2:\n s.number_of_asymptomatic -= 1\n elif self.disease_status == 3:\n s.number_of_res_symp -= 1\n elif self.disease_status == 4:\n s.number_of_res_asymp -= 1\n if self.disease_status > 0:\n s.infected.remove(self.identifier)\n if self.disease_status > 2:\n s.resistant.remove(self.identifier)\n self.disease_status = 0\n self.time_since_infection = -1", "def remove_self(self):\n if self.game.rules[\"trapping\"]:\n [neighbor.untrap() for neighbor in self.get_neighbors() if neighbor.trapped and self in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]\n self.game.empty_square(self.position)\n self.position = None", "def post_solve_pig_wood(arbiter, space_obj, _):\n removed_pigs = []\n if arbiter.total_impulse.length > 700:\n pig_shape, wood_shape = arbiter.shapes\n for pig in total_pig:\n if pig_shape == pig.shape:\n pig.life -= 20\n\n if pig.life <= 0: #when life is 0\n removed_pigs.append(pig)\n for Each_pig in removed_pigs:\n space_obj.remove(Each_pig.shape, Each_pig.shape.body)\n total_pig.remove(Each_pig)", "def remove(self, pieces):\n for piece in pieces:\n self.board[piece.row][piece.col] = None\n if piece.get_player() is Player.white:\n self.num_white_pieces -= 1\n if piece.is_king():\n self.num_white_kings -= 1\n\n elif piece.get_player() is Player.black:\n self.num_black_pieces -= 1\n if piece.is_king():\n self.num_black_kings -= 1", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def treatInfections(self, amount, disease):\r\n if disease in self.city.diseaseCounts:\r\n self.city.diseaseCounts[disease] -= amount\r\n disease.addCubes(amount)", "def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]", "def track(self, paramsDict):\n\t\tlength = self.getEffLength()\n\n\t\tbunch = paramsDict[\"bunch\"]\n\n\t\tTPB.dipoleGeneralNoKickStripSeperateField(bunch,self.functionXPRigidity,self.functionXRigidity,self.functionYPRigidity,self.functionYRigidity,length)", "def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)", "def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)", "def death_check(self):\r\n chance = random.random()\r\n if decimal.Decimal(chance) < decimal.Decimal(self.death_rate):\r\n if self.unique_id in head_of_household_list:\r\n try:\r\n head_of_household_list[self.hh_id] = 0\r\n except TypeError: # head of household migrated\r\n head_of_household_list[self.past_hh_id] = 0\r\n self.model.number_of_humans -= 1\r\n if self.unique_id in labor_list:\r\n labor_list.remove(self.unique_id)\r\n if self.work_status == 1:\r\n try:\r\n num_labor_list[self.hh_id] -= 1\r\n except TypeError:\r\n num_labor_list[self.past_hh_id] -= 1\r\n if self.unique_id in former_hoh_list:\r\n try:\r\n former_hoh_list[self.hh_id] = 0\r\n except:\r\n former_hoh_list[self.past_hh_id] = 0\r\n if [self.unique_id, self.hh_id] in single_male_list:\r\n single_male_list.remove([self.unique_id, self.hh_id])\r\n if self.unique_id in married_male_list:\r\n married_male_list.remove(self.unique_id)\r\n human_death_list.append(self.unique_id)\r\n try:\r\n hh_size_list[self.hh_id] -= 1\r\n except:\r\n hh_size_list[self.past_hh_id] -= 1\r\n human_demographic_structure_list[self.age_category] -= 1\r\n\r\n self.model.schedule.remove(self)\r\n if self in self.model.grid:\r\n self.model.grid.remove_agent(self)", "def remove_mass_unsafe(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]", "def unset_interest(self, recipient, zone):\n self.send_message(\n self.channel,\n channels.ALL_STATE_SERVERS, # FIXME: Just the specific?\n msgtypes.UNSET_INTEREST,\n recipient,\n zone,\n )", "def susceptibleToInfected(self):\n\n #create a mask to sieve those uninfected out\n infected = self.space == 1\n\n # add extra boundaries\n expan1 = np.hstack((infected,np.zeros((self.space.shape[0],1))))\n expan1 = np.vstack((expan1,np.zeros((1,expan1.shape[1]))))\n expan1 = np.hstack((np.zeros((expan1.shape[0],1)),expan1))\n expan1 = np.vstack((np.zeros((1,expan1.shape[1])),expan1))\n\n # make the addition for how many infected are around each position\n expan2 = (expan1[:-2,:-2] + \n expan1[:-2,1:-1] + \n expan1[:-2,2:] + \n expan1[1:-1,2:] + \n expan1[2:,2:] + \n expan1[2:,1:-1] + \n expan1[2:,0:-2] + \n expan1[1:-1,0:-2])\n\n exposedToRisk = np.logical_and(expan2 > 0, self.space == 0)\n # initialize a random matrix where around infection_probability % of the values are True\n infect_prob_arr = np.random.rand(self.space.shape[0], self.space.shape[1]) < self.infection_probability\n # find the overlap between healthy and \n self.space[np.logical_and(exposedToRisk, infect_prob_arr)] = 1", "def move_unhappy(self):\n ###your code here###\n n_unhappy = 0\n for person in self.people:\n if person.is_unhappy():\n old_home=person.home\n person.home.occupant = None\n new_home = self.empty_homes.pop(random.randrange(len(self.empty_homes)))\n new_home.occupant = person\n person.home = new_home\n self.empty_homes.append(old_home)\n n_unhappy += 1\n\n return n_unhappy", "def delaround(i, j):\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)", "def interaction_hole(self) -> None:\n x_dead_char = self.moving_character.x_obj\n y_dead_char = self.moving_character.y_obj\n void = ob.Void(x_dead_char, y_dead_char)\n # Replacing character by a Void\n self.grid.obj_list[self.moving_character] = void\n del self.grid.character_list[self.index_character]\n self.grid.character_just_died = True", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def remove_life(self):\r\n if self.__lives < 0:\r\n self.__lives -= 1", "def take_remove_tile_turn(self, remove_tile_fxn):\n tilesAroundOpponents = []\n for player in self.board.players:\n if not player == self.player:\n x, y = player.x, player.y\n nearbyTiles = self.board.get_removable_tiles_around(x, y)\n tilesAroundOpponents.extend(nearbyTiles)\n tilesAroundOpponents = set(tilesAroundOpponents)\n x, y = self.player.x, self.player.y\n tilesAroundMe = set(self.board.get_removable_tiles_around(x, y)) # tiles around controlled player (me)\n safelyAroundOpponents = list(tilesAroundOpponents - tilesAroundMe) # tiles around opponents but not around me\n removableTiles = set(self.board.get_all_open_removable_tiles()) # all removable tiles\n safelyRemovable = list(removableTiles - tilesAroundMe) # all removable tiles except those around me\n try:\n if safelyAroundOpponents:\n target = random.choice(safelyAroundOpponents)\n elif tilesAroundOpponents: # likely that I'm next to other player. I'll have to remove a tile available for both of us\n target = random.choice(list(tilesAroundOpponents))\n else: # no open spots to remove around players can only happen if solid unremovable tiles exist\n target = random.choice(safelyRemovable)\n except IndexError: # this error will catch if last else statement possibly triggered it\n super(TileRemoveBot, self).take_remove_tile_turn(remove_tile_fxn)\n return\n remove_tile_fxn(target.x, target.y)", "def update_spaces_threatened(self):\n # The threatened spaces will always be it's corners\n current_row = self.position[0]\n current_column = self.position[1]\n corner1 = (current_row + 1 * self.direction, current_column - 1)\n corner2 = (current_row + 1 * self.direction, current_column + 1)\n current_spaces_threatened = [corner1, corner2]\n self.spaces_threatened = current_spaces_threatened\n update_threatening_king(self)", "def refresh_hand(self):\n if not self.stats['hand'] and self.stats['discard']:\n header_print('Adding a magic amulet and refreshing your group')\n if self.data['aces']:\n self.stats['discard'].append(self.data['aces'].pop())\n self.stats['hand'] = self.stats['discard'][:]\n self.stats['discard'] = []\n random.shuffle(self.stats['hand'])", "def grow_cluster(self):\n fate = np.random.rand(len(self.perimeter)) <= self.p\n new_cluster_pts = []\n new_dead_pts = []\n for pt, f in zip(self.perimeter, fate):\n if f:\n new_cluster_pts.append(pt)\n else:\n new_dead_pts.append(pt)\n self.perimeter = set()\n for pt in new_dead_pts:\n self.dead.add(pt)\n self.world[pt] = DEAD\n for pt in new_cluster_pts:\n self.cluster.add(pt)\n self.world[pt] = ALIVE\n self.add_perimeter(pt)", "def turn(grid):\n # Select infected people\n rows, cols = np.where(grid == 1)\n #print(f\"Infected at {rows}, {cols}\")\n # In random order, go through each infected\n idx = np.arange(len(rows))\n np.random.shuffle(idx)\n for i in idx:\n # Chance to heal\n if np.random.binomial(1, heal_rate):\n grid[rows[i], cols[i]] = -1\n # Chance to die\n if np.random.binomial(1, kill_rate):\n grid[rows[i], cols[i]] = 2\n # chance to infect\n else:\n infect(rows[i], cols[i])\n # Re-count everything\n add_tally(grid)\n return grid", "def remove_from_earth(sender, instance, **kwargs):\n\tgrplst = instance.groups_as_string.split(\", \")\n\tmail = instance.associated_user.email\n\t#loop over list\n\tfor grp in grplst:\n\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))", "def filter_out_flickers(total_buffer,index_disappeared):\n \n wait_for_disparition = False\n candidate_for_disparition = -1\n to_destroy = [] #List of 3D tuples (value,first_index,last_index) of segmented elements to remove from image\n beginning_index = -1\n premier_i =-1\n list_of_is =[]\n \n previous_index, osef2, osef3 = index_disappeared[0] #Get the index for the first event\n\n for i in range(0,len(index_disappeared)):\n index,diff,list_index = index_disappeared[i]\n #Remove an appearing and disappearing object from the experiment only if it\n #disappears in the next 5 (arbitrary) frames. If longer, conseder that something relevant\n #happened.\n \n if wait_for_disparition:\n #If sth appeared, destroy it if:\n #-It is the same object that disappears\n #-If the event is a disparition\n #-If it disappears in a time<time_thr\n size = np.count_nonzero(total_buffer[:,:,index-1]==list_index[0])\n if list_index[0]==candidate_for_disparition and diff<0 and size<500:\n to_destroy.append((list_index[0],beginning_index,index))\n list_of_is.append(premier_i)\n list_of_is.append(i)\n wait_for_disparition=False\n \n if diff>0: #Creation, do wait for disparition\n candidate_for_disparition = list_index[0]\n beginning_index = index\n wait_for_disparition =True\n premier_i = i\n \n return to_destroy,list_of_is", "def deal_hole(table):\n set_player_table_attributes(table)\n create_deck(table)\n\n for player in table.player_order:\n player.hole_cards.append(table.deck.pop(0))\n player.hole_cards.append(table.deck.pop(0))\n\n inc = 2\n # Set Inc for head to head\n if len(table.player_order) == 2:\n inc = 1\n table.current_bet = table.bb_amount\n action_time(table, inc)", "def intercept_e(self):\n for asteroid in range(len(self.asteroid_id_e) - 1, -1, -1):\n if self.distance(self.Main_Ship, self.asteroid_id_e[asteroid]) < (self.spaceship_radius + self.asteroid_r_e[asteroid]):\n self.del_asteroid_e(asteroid)\n self.lives -= 1", "def test_collisions(self):\n if self.fortress_exists:\n if self.smallhex.collide(self.ship):\n self.gameevents.add(\"collide\", \"small_hex\", \"ship\")\n else:\n self.smallhex.small_hex_flag = False\n for i, shell in enumerate(self.shell_list):\n if shell.collide(self.ship):\n self.gameevents.add(\"collide\", \"shell\", i)\n #need to treat this carefully - the mine can overlap the fortress, so we don't want to remove the same missile twice\n for i, missile in enumerate(self.missile_list):\n del_missile = False\n if self.fortress_exists and missile.collide(self.fortress):\n self.gameevents.add(\"collide\", \"missile_\" + str(i), \"fortress\")\n del_missile = True\n for j, mine in enumerate(self.mine_list):\n if missile.collide(mine) and not missile.collide(self.fortress):\n self.gameevents.add(\"collide\", \"missile_\" + str(i), \"mine_\" + str(j))\n del_missile = True\n if del_missile:\n del self.missile_list[i]\n for i, mine in enumerate(self.mine_list):\n if mine.collide(self.ship):\n self.gameevents.add(\"collide\", \"mine_\" + str(i), \"ship\")", "def remove_spikes(spt_dict, remove_dict, tolerance):\n spt_data = spt_dict['data']\n spt_remove = remove_dict['data']\n\n mn, mx = tolerance\n\n for t in spt_remove:\n spt_data = spt_data[(spt_data > (t + mx)) | (spt_data < (t + mn))]\n\n spt_ret = spt_dict.copy()\n spt_ret['data'] = spt_data\n return spt_ret", "def destroy(explosions,inkblots,hero,deaths,stats):\n explosion_hits_inkblot = pygame.sprite.groupcollide(explosions,inkblots,False,True,pygame.sprite.collide_mask)\n explosion_hits_hero = pygame.sprite.spritecollideany(hero,explosions,pygame.sprite.collide_mask)\n explosion_hits_death = pygame.sprite.groupcollide(explosions,deaths,False,True,pygame.sprite.collide_mask)\n \n if explosion_hits_inkblot:\n stats.inkblot_killed()\n if explosion_hits_death:\n stats.death_killed()\n if explosion_hits_hero != None:\n hero.alive = False", "def recall(self):\n for t in self.placed_tiles:\n row = self.placed_tiles[t][1][0]\n col = self.placed_tiles[t][1][1]\n # remove tiles from board\n self.board.board[row][col].letter = None\n # put tiles back on rack\n self.rack[t] = self.placed_tiles[t][0]", "def __call__(self,camp):\n if self.npc in camp.party:\n camp.assign_pilot_to_mecha(self.npc,None)\n camp.party.remove(self.npc)\n for mek in list(camp.party):\n if hasattr(mek,\"owner\") and mek.owner is self.npc:\n camp.party.remove(mek)", "def drop_off_task(obs):\n gripper_obs = obs[0][0][2:5]\n object_obs = torch.cat((obs[0][0][5:7], torch.tensor([1.0])))\n if (sum(gripper_obs == object_obs) == 3).item():\n print(f'Dropping the object off now')\n return True\n else:\n print(f'Picking up the object!')\n return False", "def test_remove_spawning_profile_from_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) != 0:\n assert True\n\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n assert True\n break\n\n tester.delete_spawning_profile_from_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) == 0:\n assert True", "def prune(self, age_hours):\r\n pass", "def remove(self):\n\t\tcall_sdk_function('PrlShare_Remove', self.handle)", "def fix_half_inning(self, half_inning):\n outs = 0\n active_runners = []\n for atbat in half_inning:\n self.hold_runners(active_runners, atbat)\n\n active_runners = [r for r in atbat.runners\n if not r.out and r.end != 4]\n outs = atbat.outs", "def gimme_the_hole(required_hole_size):\n\tgood_object = spray(required_hole_size)\n\tmake_hole(required_hole_size, good_object)\n\treturn good_object", "def remove_circle(self, removing):\r\n t = turtle.Turtle()\r\n# For whatever number, either the user of the computer, is removing it will draw over the existing circles on the screen.\r\n for total_num in range(removing):\r\n t.speed(20)\r\n t.penup()\r\n t.goto(self.posn.x,self.posn.y)\r\n t.pendown()\r\n t.color(\"#696969\") # Changes the color to dark grey\r\n t.begin_fill()\r\n t.circle(30)\r\n t.end_fill()\r\n# Moves the turtle to the next row to start removing circle\r\n self.posn.x=self.posn.x+65\r\n if self.posn.x>=25:\r\n self.posn.y= self.posn.y-65\r\n self.posn.x=-300", "def disease_state_update(pop_matrix, mild_rec, sev_rec, pick_sick, thosp, quarantined=False):\n\n # get susceptible index value. Refer values of pop_matrix[:, 1] for more details\n qua_add = 0 # for normal situation\n if quarantined:\n qua_add = 7 # for quarantined situation\n\n # Move exposed to presymptomatic\n exposed_to_presym_ind = np.logical_and(\n pop_matrix[:, 1] == (1 + qua_add), # returns exposed/qua_exposed people\n pop_matrix[:, 3] >= np.floor(0.5 * pop_matrix[:, 2]) # returns people who passed half of incubation period\n )\n # update exposed->presymptomatic state\n pop_matrix[exposed_to_presym_ind, 1] = 2 + qua_add\n\n # Move presymptomatic to symptomatic but not yet severe.\n presymp_to_symp_ind = np.logical_and(\n pop_matrix[:, 1] == (2 + qua_add), # returns presymptomatic/qua_presymptomatic people\n pop_matrix[:, 3] >= pop_matrix[:, 2], # returns people for which incubation period is over\n # TODO: verify (Added by Ankit)\n pop_matrix[:, 4] == 0 # returns non-asymptomatic people. Asymptomatic people don't become symptomatic\n )\n # update presymptomatic->symptomatic state\n pop_matrix[presymp_to_symp_ind, 1] = 3 + qua_add\n # NOTE: Reset the day count when incubation period is over\n pop_matrix[presymp_to_symp_ind, 3] = 0\n\n # Move individuals with 6 days of symptoms to mild.\n symp_to_mild_ind = np.logical_and(\n pop_matrix[:, 1] == (3 + qua_add), # returns symptomatic/qua_symptomatic people\n pop_matrix[:, 3] == 6 # people on their 6th day after incubation period was over\n )\n pop_matrix[symp_to_mild_ind, 1] = 4 + qua_add\n\n # Move people with mild symptoms to recovered state\n mild_to_recovered_ind = np.logical_and(\n pop_matrix[:, 1] == (4 + qua_add), # returns mild/qua_mild people\n mild_rec # returns True if mild->recovered is valid based on probabilities defined in Lui et al. 2020\n )\n # update mild->recovered state\n pop_matrix[mild_to_recovered_ind, 1] = 6 + qua_add\n\n # Move people with severe symptoms to recovered state\n severe_to_recovered_ind = np.logical_and(\n pop_matrix[:, 1] == (5+qua_add), # returns severe/qua_severe people\n sev_rec # returns True if severe->recovered is valid based on probabilities defined in Cai et al.\n )\n # update severe->recovered state\n pop_matrix[severe_to_recovered_ind, 1] = 6 + qua_add\n\n # symptomatic to the “mild” or “severe”\n # Verity et al. hospitalisation.\n asp = np.array([0, .000408, .0104, .0343, .0425, .0816, .118, .166, .184])\n # Verity et al. corrected for Tuite\n aspc = np.array([.0101, .0209, .0410, .0642, .0721, .2173, .2483, .6921, .6987])\n\n age_bucket = 9 # age ranges from (0-10) to (90+)\n for buc in range(age_bucket):\n\n # Assign individuals with mild symptoms for six days, sick, between 10*sci and 10*sci+1 years old to severe\n # and count as hospitalized.\n\n severe_ind = np.logical_and.reduce((\n pop_matrix[:, 1] == 4 + qua_add, # returns mild/qua_mild people\n pop_matrix[:, 3] == 6, # 6th day after incubation period ??\n pick_sick < asp[buc], # Verity and colleagues data (low-risk)\n pop_matrix[:, 5] >= 10 * buc, # people age lower bound\n pop_matrix[:, 5] < (10 * buc + 10), # people age upper bound\n pop_matrix[:, 7] == 0 # people not having chronic disease\n ))\n # consider new severe people as hospitalized\n thosp += np.sum(severe_ind)\n # update mild->severe state\n pop_matrix[severe_ind, 1] = 5 + qua_add\n\n # Move individuals with Chronic diseases with 6 days of mild to severe.\n severe_chronic_ind = np.logical_and.reduce((\n pop_matrix[:, 1] == 4 + qua_add, # returns mild/qua_mild people\n pop_matrix[:, 3] == 6, # 6th day after incubation period ??\n pick_sick < aspc[buc], # Tuite and colleagues data (high-risk)\n pop_matrix[:, 5] >= (10 * buc), # people age lower bound\n pop_matrix[:, 5] < (10 * buc + 10), # people age upper bound\n pop_matrix[:, 7] == 1 # person having chronic disease\n ))\n # consider new severe people as hospitalized\n thosp += np.sum(severe_chronic_ind)\n # update mild->severe state\n pop_matrix[severe_chronic_ind, 1] = 5 + qua_add\n \n return pop_matrix, thosp", "def update(self):\n super().update()\n bulletKeys=list(Bullet.registeredBullets.keys())\n for currKey in bulletKeys:\n currBullet=Bullet.registeredBullets[currKey]\n if (self.hasCollidedWith(currBullet)):\n currBullet.delete()\n currBullet.entityThatCreatedMe.score+=self.pointValue\n self.shatter()\n break", "def execute_god(n: int) -> None:\n\n tile = game_state.remove_auction_tile(n)\n game_state.give_tiles_to_player(game_state.get_current_player(), [tile])\n game_state.remove_single_tiles_from_current_player([gi.INDEX_OF_GOD])\n game_state.advance_current_player()", "def remove_person_from_the_station(self, station: TelegramController.Station):\n\n if station.line_number in self.__stations_dict and station.station_number in self.__stations_dict[\n station.line_number]:\n if self.__stations_dict[station.line_number][station.station_number] == 1:\n del self.__stations_dict[station.line_number][station.station_number]\n if len(self.__stations_dict[station.line_number]) == 0:\n del self.__stations_dict[station.line_number]\n elif self.__stations_dict[station.line_number][station.station_number] > 1:\n self.__stations_dict[station.line_number][station.station_number] -= 1\n self.__message_sender.send_line(station.line_number, update_passengers=True)\n else:\n print(\"whoops an error, looks like the current station doesn't exit and there's no person waiting for it.\")", "def removeNeighbor(self, neighborID):", "def test_destroy_nas_share_by_pool(self):\n pass", "def eat_coin(self):\r\n self.app.coins.remove(self.grid_pos)\r\n self.current_score += 1", "def replace_person(self,p):\n p.age = 15\n p.days = 0\n \n self.age_group[4].remove(p.identifier)\n self.age_group[0].add(p.identifier)\n \n if np.random.random() < self.sexual_activity_high:\n p.sexual_activity = 1\n self.high_sexual_activity.add(p.identifier)\n else:\n p.sexual_activity = 0\n \n p.cure(self)\n \n #remove all partnerships where p is involved in\n for i,ps in enumerate(self.partnerships):\n if p.identifier in [ps.persons[0].identifier,ps.persons[1].identifier]:\n ps.remove(self)\n self.partnerships[i] = None\n #if deleting steady partnership\n if ps.type == 1:\n self.number_of_steady -= 1\n self.partnerships = list(filter(None,self.partnerships))", "def wipe(self, segments):\n self.firstCoords = None\n self.moveLead(MIDDLE, MIDDLE)\n for seg in self.segs:\n self.can.delete(seg.getGraphicObject())\n seg.rmGraphicObject()\n self.segs = segments\n self.redrawSegs()", "def infectedToRecovered(self):\n\n # initialize a random matrix where around recovery_probability % of the values are True\n recover_prob_arr = np.random.rand(self.space.shape[0],self.space.shape[1]) < self.recovery_probability\n # find the overlap between infected and above array and make those people recovered\n self.space[np.logical_and(self.space == 1, recover_prob_arr)] = 2", "def delete_poses():\n global saved_marshmallow_pose\n global saved_mouth_pose\n saved_marshmallow_pose = None\n saved_mouth_pose = None\n print \"Deleted poses\"", "def remove(self,s):\n \n p1, p2 = self.persons\n \n p1.remove_partner(p2,s)\n p2.remove_partner(p1,s)", "def remove_from_hand(self):\n pass", "def post_solve_pig_wood(arbiter, space, _):\n pigs_to_remove = []\n if arbiter.total_impulse.length > 700:\n pig_shape, wood_shape = arbiter.shapes\n for pig in pigs:\n if pig_shape == pig.shape:\n pig.life -= 20\n global score\n score += 10000\n if pig.life <= 0:\n pigs_to_remove.append(pig)\n for pig in pigs_to_remove:\n space.remove(pig.shape, pig.shape.body)\n pigs.remove(pig)", "def remove_player(self, seat_id):\n player_id = seat_id\n try:\n idx = self._seats.index(self._player_dict[player_id])\n self._seats[idx] = Player(0, stack=0, emptyplayer=True)\n del self._player_dict[player_id]\n self.emptyseats += 1\n except ValueError:\n pass", "def randomly_spawn_mothership(self) -> None:\n return", "def move_cloud(self):\n self.remove()\n self.min_x -= 1\n self.max_x -= 1\n self.update()", "def identifyThreats(self, env):\n # Find visibility cell\n cell_x = int(np.floor(self.ctr[0] / env.visibility_cell_width))\n cell_y = int(np.floor(self.ctr[1] / env.visibility_cell_width))\n # Fetch the visibility map\n v_map = np.asarray(env.visibility_cell[cell_x][cell_y].v_map)\n # Set the visibility probability cutoff\n cutoff = 0.44 # NEED TO ADJUST THIS VALUE TO VISIBILITY MAPS, MAYBE BASED ON MEAN VISIBILITY?\n # Find locations with visibility probability above the cutoff # ARE THERE ANY OTHER WAYS OF IDENTIFYING (DIFFERENT) OBJECTIVES?\n [x, y] = np.where(v_map > cutoff)\n # Plot these locations\n# plt.figure()\n# plt.scatter(x, y)\n# plt.title('Locations Exceeding Cutoff')\n# plt.show()\n # Return the visibility cell (x, y) values of threatening cells\n return [x, y]", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def make_updates(self, x):\n global inc\n for stone in self.players[x].stones_reference:\n temp = Thread(target=stone.move_stone)\n temp.daemon = True\n temp.start()\n if not stone.is_alive:\n self.players[x].stones_reference.remove(stone)\n if self.num_players == 1:\n self.maps[0].control_music(self.players[0].min_x)\n\n rand_x = randrange(1, 100)\n rand_x_2 = randrange(1, 150)\n rand_x_3 = randrange(1, 75)\n if inc % rand_x == 0:\n self.maps[x].clouds[0].move_cloud()\n if inc % rand_x_2 == 0:\n self.maps[x].clouds[1].move_cloud()\n if inc % rand_x_3 == 0:\n self.maps[x].clouds[2].move_cloud()\n inc += 1", "def removeSkeletalConstraints(self):\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def testRemove(self):\n\n numIns = randint(70,200)\n\n for i in xrange(numIns):\n self.s.insert(i, None)\n for i in xrange(numIns):\n self.s.remove(i)", "def pack_has_eaten(self, elk_to_eat):\n # Remove elk\n for elk in elk_to_eat:\n self.model.grid.remove_agent(elk)\n self.model.schedule.remove(elk)\n logging.debug('Pack has eated, disbanding pack with size {}'.format(\n len(self.wolves))\n )\n for wolf in self.wolves:\n wolf.energy += self.model.wolf_gain_from_food*len(elk_to_eat)\n wolf.kills += 1\n self.remove_from_pack(wolf)\n # Remove pack from scheduler\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)", "def delete_inaccessible_buildings(self):\n def num_adj_buildings(pos):\n \"\"\"Helper function that returns number of immediately adjacent commercial buildings\"\"\"\n neighborhood = self.environment.grid.get_neighborhood(pos, moore=False, include_center=False)\n\n adj_num = 0\n num_cells = 0\n for cell in neighborhood:\n num_cells += 1\n # Check contents of each neighbor\n if not self.environment.grid.is_cell_empty(cell):\n contents = self.environment.grid.get_cell_list_contents(cell)\n for agent in contents:\n if type(agent) is Building or type(agent) is CommercialBuilding:\n # check that cell is not empty and contains a commercial building in it\n adj_num += 1\n break\n return adj_num, num_cells\n\n def is_inacessible(cell):\n \"\"\"Helper function, converts to boolean\"\"\"\n adj, count = num_adj_buildings(cell)\n return adj == count\n\n # Main Function\n for building in self.environment.agents['residences']:\n if is_inacessible(building.pos):\n self.environment.grid.remove_agent(building)\n self.environment.agents['residences'].remove(building)\n\n for building in self.environment.agents['commercial_buildings']:\n if is_inacessible(building.pos):\n self.environment.grid.remove_agent(building)\n self.environment.agents['commercial_buildings'].remove(building)", "def kill(self):\n for piece in self.board.pieces:\n piece.destroyed = True", "def update_spaces_threatened(self):\n self.spaces_threatened = self.all_possible_moves(return_spaces_threatened=True)\n update_threatening_king(self)", "def update_spaces_threatened(self):\n self.spaces_threatened = self.all_possible_moves(return_spaces_threatened=True)\n update_threatening_king(self)", "def fjernSang(self, sang):\r\n self._sanger.remove(sang)", "def cleaningStayPoint( self, StayPoints):\n\n for i in range(len(StayPoints)):\n x1, y1 = StayPoints[i]\n for j in range(len(StayPoints)):\n if i>=j:\n continue\n x2, y2 = StayPoints[j]\n dist = (x1 - x2)**2 + (y1 - y2)**2\n if dist < THRESHOLD_DIST:\n # merge this ! \n avg_x = (x1+x2)/2\n avg_y = (y1+y2)/2\n\n StayPoints.remove((x1,y1))\n StayPoints.remove((x2,y2))\n StayPoints.append( (avg_x,avg_y))\n return True\n return False", "def _unhide_if_applicable(self, boxes_contours, used_boxes):\n unused_boxes = boxes_contours[used_boxes == False]\n if len(unused_boxes) > 0:\n hidden = [detection for detection in self.detections if detection.is_hidden]\n for detection in hidden:\n rd = detection.relative_distance_with(unused_boxes)\n min_rd = rd.min()\n argmin_rd = rd.argmin()\n ix = np.where(np.all(boxes_contours == unused_boxes[argmin_rd], axis=1))[0][0]\n if min_rd < 1.5 * self.dist_thresh:\n detection.unhide(boxes_contours[ix])\n used_boxes[ix] = True", "def eat(self, x, y):\n for dot in self.top_row:\n if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and\n dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):\n self.top_row.remove(dot)\n for dot in self.left_col:\n if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and\n dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):\n self.left_col.remove(dot)\n for dot in self.bottom_row:\n if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and\n dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):\n self.bottom_row.remove(dot)\n for dot in self.right_col:\n if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and\n dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):\n self.right_col.remove(dot)", "def mine_remove(x, y):\n click.echo('Removed mine at %s,%s' % (x, y))", "def pruning(self):\n data = self.data.copy()\n for d in self.data:\n # cascade purning method. Inspired from \"Efficient Computation of Group Skyline Queries on MapReduce (FCU)\"\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n # prune data points that are obviously dominated by current data point\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n self.pruned = data", "def test_destroy_nas_share_by_nas(self):\n pass", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def delete_leader(self):", "def make_clips(self):\n\n average_messege_count, streamer_messeges_data = self.__do_analysis()\n\n clipworthy_clips = []\n\n #add clipworthy clips\n for entry in streamer_messeges_data:\n if((entry['messeges_count']*entry['messeges_count']) > (average_messege_count*1.8)):\n clipworthy_clips.append(entry)\n\n #combine clips that are next to one another in time\n clip_number = 0\n while(True):\n #print('clip_number = ' + str(clip_number) +' , length of cliparr = ' + str(len(clipworthy_clips)))\n if(clip_number >= (len(clipworthy_clips))-1):\n #at end of clips\n break\n\n if (clipworthy_clips[clip_number]['end_time']==clipworthy_clips[clip_number+1]['start_time']):\n #duplicate clip detected\n #print('dublicate clip detected for clip ' + str(clip_number))\n clipworthy_clips[clip_number]['end_time']=clipworthy_clips[clip_number+1]['end_time']\n #print('cliparr length before ridding: ' + str(len(clipworthy_clips)))\n clipworthy_clips.remove(clipworthy_clips[clip_number+1])\n #print('cliparr length after ridding: ' + str(len(clipworthy_clips)))\n #print('')\n else:\n clip_number = clip_number + 1\n\n\n print('clipworthy clips will now be made')\n clipSlicer = ClipSlicer(clipworthy_clips)\n clipSlicer.make_clips()\n\n print(\"clipworthy clips for streamer \"+ self.streamer + \" have been made\")", "def _remove_walls(current: GridCell, choice: GridCell):\n if choice.x > current.x:\n current.walls[1] = False\n choice.walls[0] = False\n elif choice.x < current.x:\n current.walls[0] = False\n choice.walls[1] = False\n elif choice.y > current.y:\n current.walls[3] = False\n choice.walls[2] = False\n elif choice.y < current.y:\n current.walls[2] = False\n choice.walls[3] = False", "def process_tile_reveal(self, tile_reveal_result):\r\n\r\n self.num_of_hidden_non_mines_tiles -= tile_reveal_result.non_mines_uncovered\r\n if tile_reveal_result.hit_mine:\r\n self.lose_game(tile_reveal_result.mine_tiles)\r\n elif self.num_of_hidden_non_mines_tiles == 0:\r\n self.win_game()", "def clear_restriction_details(self):\n\t\n\t\tif getattr(self,'new_seq_win_objs',None):\n\t\t\tfor obj in self.new_seq_win_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.new_seq_win_objs={}\n\t\t\tself.donepos={}\n\t\t\t#\n\t\t\tfor obj in self.temp_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.temp_objs={}\n\t\t\tself.temp_sites={}\n\t\t\tself.seqframe.delete('labelrect')\n\t\t\tself.seqframe.delete('line')\n\t\t\tself.seqframe.delete('templabelrect')\n\t\t\tself.seqframe.delete('templine')\n\t\t\t#also clear the sites list - this is used in tidying and rendering lines/rects\n\n\t\treturn", "def reveal_mines(self, row, col):\n for loc in self.mineboard.mine_places:\n if loc != [row, col]:\n i, j = loc[0], loc[1]\n if self.mineboard.gameboard[i][j] == 'F':\n continue\n self.canvas.delete(self.cells[i][j])\n self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=MINE, anchor='nw')", "def kick_pair(self, stellar_pair, dt):\n\n self.pair = stellar_pair\n self.pos_com = stellar_pair.center_of_mass()\n self.vel_com = stellar_pair.center_of_mass_velocity()\n self.dt = dt\n self.calculate_useful_stuff()\n\n if self.mass_accretion: self.kick_from_accretion()\n if self.tides: self.kick_from_eccentric_tides()", "def _remove(updated_pending_requests):\n remove_member_from_pending_query = Query.room_request(roomname, \"\", updated_pending_requests)\n self.db.execute_query(remove_member_from_pending_query)", "def find_spare_alter(participant_data,\n true_distro_by_round,\n network_topology_name,\n additional_names_count,\n only_non_neighbors=False,\n divergence_type=\"JS\",\n memory_length=5,\n graph=False):\n\n participant_differences = {}\n\n net = nb.net(network_topology_name)\n\n # We start by going through each participant\n for participant, data in participant_data.items():\n # First we look at the overall divergence without unstructured names\n div_no_unstructured_by_round = []\n\n distro_by_round_no_unstructured = impose_limited_memory(data[\"distro_by_round_no_unstructured\"],\n memory_length=memory_length)\n for game_round in range(1,26):\n\n # a new list of names needs to be truncated to induce some memory\n seen_distro_with_no_unstructured, true_distro = create_continuity_and_probabilities(\n distro_by_round_no_unstructured[game_round], true_distro_by_round[game_round])\n\n # divergence between real and distro with no additional names\n if divergence_type == \"JS\":\n div_no_unstructured_by_round.append(JS(seen_distro_with_no_unstructured, true_distro))\n else:\n div_no_unstructured_by_round.append(KL(seen_distro_with_no_unstructured, true_distro))\n\n\n # Then we calculate the KL divergence for the real rounds with unstructured\n div_with_unstructured_by_round = []\n\n distro_by_round_unstructured = impose_limited_memory(data[\"distro_by_round_unstructured\"],\n memory_length = memory_length)\n\n for game_round in range(1,26):\n\n # a new list of names needs to be truncated to induce some memory\n seen_distro_with_unstructured, true_distro_2 = create_continuity_and_probabilities(\n distro_by_round_unstructured[game_round],\n true_distro_by_round[game_round])\n\n # KL divergence between real and distro with additional name\n if divergence_type == \"JS\":\n div_with_unstructured_by_round.append(JS(seen_distro_with_unstructured, true_distro_2))\n else:\n div_with_unstructured_by_round.append(KL(seen_distro_with_unstructured, true_distro_2))\n\n\n net = nb.net(network_topology_name)\n\n # here we get the fixed list of the participant's network alters\n alters = list(net.network.neighbors(participant))\n\n names_to_add_by_round = {}\n\n for round, round_partner in data[\"actual_alters\"].items():\n # We randomly select additional alters until we have the right number that aren't the one the participant actually just played against. This is complicated in the case of the small world networks, which don't have a constant degree distribution. If there aren't enough alters for the required number of exposures, we get more from an alter's alter.\n list_of_random_alters = []\n new_alters = list(alters)\n new_alters.remove(round_partner)\n if len(new_alters) >= additional_names_count:\n list_of_random_alters = random.sample(new_alters, additional_names_count)\n else:\n more_needed = additional_names_count - len(new_alters)\n next_alter = 0\n alters_to_add = new_alters\n while more_needed > 0:\n alters_alters = list(net.network.neighbors(new_alters[next_alter]))\n try:\n alters_alters.remove(round_alter)\n except:\n pass\n try:\n alters_alters.remove(participant)\n except:\n pass\n alters_to_add.extend(alters_alters)\n next_alter +=1\n more_needed -= len(alters_to_add)\n # It's possible that we now have too many names, so we truncate the list\n list_of_random_alters = alters_to_add[:additional_names_count]\n assert len(list_of_random_alters) == additional_names_count, (list_of_random_alters, additional_names_count)\n\n\n\n # We get the name the random alter played and add it to the list\n other_names = []\n for other_alter in list_of_random_alters:\n other_names.append(participant_data[other_alter][\"names_played\"][round-1])\n names_to_add_by_round[round] = other_names\n\n\n # We go through round by round and add in the newly visible names\n simulated_distro_by_round = {}\n\n # now we add to distribution with out unstructured\n for game_round in range(1,26):\n no_unstructured = list(data[\"distro_by_round_no_unstructured\"][game_round])\n no_unstructured.extend(names_to_add_by_round[game_round])\n\n simulated_distro_by_round[game_round] = no_unstructured\n\n # That new list of names needs to be truncated to induce some memory\n distro_by_round_with_random_others = impose_limited_memory(simulated_distro_by_round,\n memory_length=memory_length)\n\n div_with_random_by_round = []\n for game_round in range(1,26):\n\n seen_distro_random_others, true_distro = create_continuity_and_probabilities(\n distro_by_round_with_random_others[game_round],\n true_distro_by_round[game_round])\n\n # Then we calculate the divergence from distro with the additional names to true distro\n if divergence_type == \"JS\":\n div_with_random_others = JS(seen_distro_random_others, true_distro)\n else:\n div_with_random_others = KL(seen_distro_random_others, true_distro)\n div_with_random_by_round.append(div_with_random_others)\n\n # now that we have the divergence to the true distro from simulated distro, we compare it\n # to the divergence to the true distro from the real additional name distro. A positive\n # difference as calculated means the additional name version carries more information\n # (because it is closer to the true)\n\n game_diffs = [i-j for i, j in zip(div_with_random_by_round,div_with_unstructured_by_round)]\n\n divergence_ratio = [1 - actual/total if total !=0 else 0 for actual, total in zip(div_with_unstructured_by_round, div_no_unstructured_by_round)]\n\n #print(\"comparison:\", div_with_random_by_round)\n #print(\"base_diff:\", div_no_unstructured_by_round)\n #print(\"ratio:\", divergence_ratio)\n\n participant_differences[participant] = (game_diffs, list_of_random_alters, div_no_unstructured_by_round, divergence_ratio)\n\n return participant_differences", "def nim(heaps: List[int], game_type: str):\n\n print(game_type, heaps, end=' ')\n\n is_misere = game_type == MISERE\n\n is_near_endgame = False\n count_non_0_1 = sum(1 for x in heaps if x > 1)\n is_near_endgame = (count_non_0_1 <= 1)\n\n # nim sum will give the correct end-game move for normal play but\n # misere requires the last move be forced onto the opponent\n if is_misere and is_near_endgame:\n moves_left = sum(1 for x in heaps if x > 0)\n is_odd = (moves_left % 2 == 1)\n sizeof_max = max(heaps)\n index_of_max = heaps.index(sizeof_max)\n\n if sizeof_max == 1 and is_odd:\n return \"You will lose :(\"\n\n # reduce the game to an odd number of 1's\n return index_of_max, sizeof_max - int(is_odd)\n\n nim_sum = functools.reduce(lambda x, y: x ^ y, heaps)\n if nim_sum == 0:\n return \"You will lose :(\"\n\n # Calc which move to make\n for index, heap in enumerate(heaps):\n target_size = heap ^ nim_sum\n if target_size < heap:\n amount_to_remove = heap - target_size\n return index, amount_to_remove", "def restart():\n for pig in pigs.copy():\n space.remove(pig.shape, pig.shape.body)\n pigs.remove(pig)\n for bird in birds.copy():\n space.remove(bird.shape, bird.shape.body)\n birds.remove(bird)\n for column in columns.copy():\n space.remove(column.shape, column.shape.body)\n columns.remove(column)\n for beam in beams.copy():\n space.remove(beam.shape, beam.shape.body)\n beams.remove(beam)", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "def clear_social_distancing_func(self):\n self.social_distancing_func = None\n for agent in self.grid.get_all_cell_contents():\n agent.social_distancing_func = None", "def mut_individual(individual, pexist):\n \n network = individual.network\n for i in network.index.values:\n age = network.loc[i,'age']\n if random.random() < AGEDEP(age, pexist):\n if network.loc[i,'in'] == 1:\n network.loc[i, :] = 0\n network.loc[:, i] = 0\n \n if network.loc[i,'in'] == 0:\n network.loc[i,'in'] = 1\n network.loc[i,'age'] = 1\n for j in network.columns.values[2:]:\n if random.random() < 0.1 and i != j:\n network.loc[i,j] = 1\n network.loc[j,i] = network.at[i,j]\n \n relevant = network.loc[network['in']==1]\n for _ in range(10):\n i = random.choice(relevant.index.values)\n j = random.choice(relevant.columns.values[2:])\n network.loc[i,j] = abs(network.at[i,j]-1)\n network.loc[j,i] = network.at[i,j]\n \n if network.loc[i][1:].sum() == 0:\n network.loc[i,'in'] = 0 \n network.loc[i,'age'] = 0\n \n individual.network = network\n individual.age = 1\n return individual,", "def removing_ingridients(self, value):\n self._removing_ingridients = value\n if value:\n self._adding_ingridients = not value\n self._adding_meals = not value", "def animal_dies(self):\n for species, animals in self.fauna_list.items():\n for animal in animals:\n if animal.probability_of_death:\n self.remove_animal(animal)", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1" ]
[ "0.5290711", "0.5255218", "0.5245789", "0.51121676", "0.50786626", "0.50382346", "0.50287575", "0.49950922", "0.4993932", "0.49557725", "0.4935115", "0.49344", "0.4929344", "0.4913823", "0.4904666", "0.49032527", "0.4873626", "0.487273", "0.48126963", "0.48005226", "0.47989222", "0.47786367", "0.47760165", "0.47643363", "0.47625166", "0.47375506", "0.47186613", "0.47140265", "0.47026393", "0.46985075", "0.4695604", "0.4692345", "0.46867716", "0.4684959", "0.46670774", "0.4664182", "0.46558902", "0.46499535", "0.46490496", "0.46489275", "0.4643379", "0.46400452", "0.4621117", "0.46111888", "0.46070772", "0.4606569", "0.4603728", "0.45968613", "0.4596382", "0.45840833", "0.45562813", "0.45408097", "0.4540514", "0.45387802", "0.45385504", "0.45365995", "0.45267624", "0.4523406", "0.4521238", "0.4520846", "0.45169756", "0.45155758", "0.4514225", "0.45104092", "0.45011362", "0.44932806", "0.44900537", "0.4489703", "0.4488297", "0.44840506", "0.44783673", "0.44777015", "0.44734654", "0.44697103", "0.44697103", "0.44674706", "0.4460009", "0.44592905", "0.4458557", "0.44541445", "0.44534758", "0.4452695", "0.44509357", "0.44479647", "0.44446868", "0.44442102", "0.44396478", "0.44343284", "0.44303867", "0.44273904", "0.44238448", "0.44230598", "0.44226974", "0.44222444", "0.44216946", "0.44209638", "0.44195613", "0.44165528", "0.44143727", "0.4409107", "0.4405903" ]
0.0
-1
Return the parsed contents of the config file.
def get_config(): return json.loads(CONFIG_FILE.read_text())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_config(self):\n config = configparser.ConfigParser()\n config.read(self.configfile)\n return config", "def get(self):\n config = self.user_file.parseString(self.content)\n return config", "def get(self):\n if self.file:\n self._read()\n config = self.client_file.parseString(self.content)\n return config", "def _parseConfigFile(self):\n\n configFile = self._configFile()\n\n configs = configparser.SafeConfigParser()\n try:\n with open(configFile, 'r', encoding='utf-8') as fh:\n try:\n configs.readfp(fh)\n return configs\n except configparser.Error:\n log(ERROR, traceback.format_exc())\n return None\n except IOError:\n log(DEBUG, \"Error: Could not read from config file {0}\\n\".format(configFile))\n return None", "def get_config():\n with open(CONFIG_PATH) as config_file:\n data = json.load(config_file)\n return data", "def read_config():\n with open(CONFIG_PATH) as config_file:\n return json.load(config_file)", "def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config", "def read_config(self, config_filename):", "def config():\n with open(config_path) as config_file:\n data = json.load(config_file)\n return data", "def parse_config(self):\n # TODO: parse config file\n pass", "def parse(self):\n\n if exists(self.filepath):\n content = open(self.filepath).read().decode(charset)\n else:\n content = \"\"\n\n try:\n config = toml.loads(content)\n except toml.TomlSyntaxError:\n raise ConfigSyntaxError\n\n return config", "def get_config() -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n config.read(CONFIG_FILE)\n\n return config", "def _read_config_file(self):\r\n\r\n try:\r\n with open(self.config, 'r') as f:\r\n config_data = json.load(f)\r\n except FileNotFoundError:\r\n config_data = {}\r\n\r\n return config_data", "def get_config(file_path):\n config = configparser.ConfigParser()\n config.read(file_path)\n return config", "def read_config(config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n return config", "def get_config(configfile):\n cfg = ConfigParser.ConfigParser()\n cfg.read(configfile)\n return cfg", "def read_config(contents):\n file_obj = io.StringIO(contents)\n config = six.moves.configparser.ConfigParser()\n config.readfp(file_obj)\n return config", "def read_configuration (self):\n\t\tself.config.read(self._configfile)", "def get_config():\n handle = open(\"config.json\", \"r\")\n raw_json = handle.read()\n handle.close()\n return json.loads(raw_json)", "def get(self):\n return util.getJSONFile(CONFIG_PATH)", "def get(self):\n _config_file = None\n _parsed_config = configparser.ConfigParser()\n try:\n _config_file = open(self._config_path, \"r\")\n except OSError as e:\n logger.error(str(e))\n Utils.exiter(1)\n try:\n _parsed_config.read_file(_config_file)\n except configparser.ParsingError as e:\n logger.error(str(e))\n Utils.exiter(1)\n\n _defaults = _parsed_config.defaults()\n _t = {}\n for (_k, _v) in _defaults:\n _t[self._format_keys(_k)] = self._format_values(_v)\n self.config[self._format_keys(\"defaults\")] = _t\n\n for _s in _parsed_config.sections():\n _t = {}\n for (_k, _v) in _parsed_config.items(_s):\n _t[self._format_keys(_k)] = self._format_values(_v)\n self.config[self._format_keys(_s)] = _t\n logger.debug(f\"Got config: {json.dumps(self.config, indent=2)}\")\n return self.config", "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def read_config():\n config = configparser.ConfigParser()\n if not os.path.exists(\"config.cfg\"):\n raise FileNotFoundError(\"configuration file (config.cfg) not found!\")\n config.read(\"config.cfg\")\n return config", "def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config", "def get_config(config_path='config.ini'):\n config = configparser.ConfigParser()\n config.read(config_path)\n return config", "def get_config(self):\r\n if not os.path.exists(self.config_file):\r\n return None\r\n return json.loads(file(self.config_file).read())", "def parsed_file(config_file):\n parser = ConfigParser(allow_no_value=True)\n parser.read_file(config_file)\n return parser", "def readConfig(file=\"dispatcher.conf\"):\n\n parser = configparser.ConfigParser()\n parser.read(file)\n machines = parser.items(\"MACHINES\")\n commands = parser.items(\"COMMANDS\")\n\n return machines, commands", "def get_config(full_path):\n config = configparser.RawConfigParser()\n config.read(full_path)\n print(\"Found these configs:\")\n for config_name in config.sections():\n print('-', config_name)\n return config", "def read_config(self) -> dict:\n\n if self.valid_is_json():\n with open(self.file_name, 'r') as file:\n return json_loads(file.read())\n elif self.valid_is_yaml():\n with open(self.file_name, 'r') as file:\n return yaml_loads(file.read(), Loader=Loader)\n else:\n raise Exception('Invalid config file')", "def parse_config(path):\n #Open the file\n f = open(path)\n section = None\n\n #for each line in file:\n for line in f:\n #Get rid of extra spaces and carridge-returns\n line = line.rstrip('\\r\\n')\n\n #If there is a comment on the line, get rid of everything after the comment symbol and trim whitespace\n #Example: hi there #This is a comment\n if \"#\" in line:\n line, comment = line.split(\"#\", 1)\n line = line.strip()\n\n #If there is a section header on the line, figure out what it's name is, and save it\n if \"[\" in line:\n #Example: [StartupMods]\n section = line.split(\"[\", 1)[1].split(\"]\", 1)[0]\n parsed_config[section] = list()\n\n #If there is no section header, than the line must contian data, so save it under the current section\n else:\n if line is not \"\":\n parsed_config[section].append(line)\n\n #Message the system\n logging.info(\"Finished parsing \" + path)\n return parsed_config", "def load_config(self):\n\n with open(os.path.expanduser(self.config_filename), 'r') as f:\n lines = f.readlines()\n\n _usable = lambda l: not(l.startswith('#') or l.strip() == '')\n lines = filter(_usable, lines)\n\n def _build_config(key, value, d):\n \"\"\" Called recursively to split up keys \"\"\"\n pieces = key.split('.', 1)\n if len(pieces) == 1:\n d[pieces[0]] = value.strip()\n else:\n d[pieces[0]] = _build_config(pieces[1], value, {})\n\n return d\n\n d = {}\n for line in lines:\n if '=' not in line:\n continue\n\n key, value = line.split('=')\n d = _build_config(key, value, d)\n\n return d", "def load_config():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config", "def parse_config(path):\n configuration = load_config(path)\n validate_config(configuration)\n return configuration", "def load(self):\n config_dict = {}\n with open(\n os.path.join(\n os.path.dirname(\n os.path.abspath(\n inspect.stack()[0][1]\n )\n ),\n \"config.txt\"), 'r') as config_file:\n for line in config_file:\n if not line.startswith('#'):\n line = line.strip().split('=', 1)\n if len(line) == 2:\n config_dict[line[0]] = line[1]\n return config_dict", "def readConfig():\n hosts = []\n domains = []\n with open(\"./host.conf\", \"r\") as fd:\n for line in fd.readlines():\n line = line.strip().split()\n if line != []:\n # Parse config for zone files and hosts\n if line[0] == \"ZONE_FILE:\":\n zoneFile = line[1]\n if line[0] == \"REVERSE_ZONE_FILE:\":\n reverseZoneFile = line[1]\n if line[0] == \"HOST:\":\n hosts.append((line[1], line[2], line[3]))\n if line[0] == \"DOMAIN:\":\n domains.append((line[1], line[2], line[3]))\n\n return zoneFile, reverseZoneFile, hosts, domains", "def parse_config():\n config_file = glob.glob('config.ini')\n parser = ConfigParser()\n if config_file:\n parser.read(config_file)\n else:\n cwd = os.path.abspath(os.path.dirname(__file__))\n config_file = os.path.join(cwd, 'default_config.ini')\n parser.read(config_file)\n return _parse_config(parser)", "def get_config(config_file):\n assert os.path.isfile(config_file), \"Config file %s does not exist!\" \\\n % os.path.abspath(config_file)\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n return config", "def get_config(config_file):\n assert os.path.isfile(config_file), \"Config file %s does not exist!\" \\\n % os.path.abspath(config_file)\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n return config", "def read_settings():\n settings_path = join(dirname(dirname(__file__)), '.settings')\n filename = settings_path\n settings = configparser.ConfigParser()\n settings.read(filename)\n return settings", "def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config", "def read(self):\n\n if self.cfg:\n return self.cfg\n\n with open(self.config_path) as buf:\n s = buf.read()\n\n self.cfg = self.schema.loads(s)\n\n return self", "def read_config(self,confile):\n\n\n print(\"reading:\",confile)\n with open(confile) as parf:\n data=yaml.load(parf)\n\n\n return data", "def get_config(config_file):\n assert os.path.isfile(config_file), \"Config file %s does not exist!\" \\\n % os.path.abspath(config_file)\n\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n\n return config", "def get_config_file_content(self):\n\n config_content: List[str] = [\n 'server {',\n\t ' listen {};'.format(self.port),\n '',\n ' ##',\n ' # PHP-FPM',\n ' ##',\n ' #location ~ \\.php$ {',\n \t ' #include /etc/nginx/fastcgi_params;',\n\t\t ' #root /var/www/src;',\n ' #fastcgi_split_path_info ^(.+?\\.php)(/.*)$;',\n ' #fastcgi_pass\tphpfpm:3002;',\n\t\t ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;',\n ' #}',\n '',\n ' location / {',\n\t\t ' root /var/www/src;',\n ' index index.html;'\n\t\t ' #index index.php;',\n\t\t ' #rewrite ^ /index.php?$args last; break;',\n\t ' }',\n '}'\n ]\n return config_content", "def get_full_config(self):\n return self._read_config()", "def read_config_file(filename):\n\n # Read the config file\n toml_data = open(filename).read()\n\n # Load the definitions in the config file\n data = toml.loads(toml_data)\n\n return data", "def get_config():\n\n _, res = DBX.files_download(c.io.FILE_CONFIG)\n return yaml.load(io.BytesIO(res.content), Loader=yaml.SafeLoader)", "def load(self, file, config={}):\n if not os.path.exists(file):\n err = 'ERROR: config file at \"{f}\" does not exist'\n err = err.format(f=file)\n raise SettingsError(err)\n config = config.copy()\n cp = GoulashConfigParser()\n cp.read(file)\n return cp._sections", "def get_config(self):\n if self.config is None:\n self.config = Configuration()\n\n #Hard coded the file for now, will change with Django interface\n self.config.parse_file('config')", "def get_settings():\n with open('config/config.json') as data_file:\n settings = json.load(data_file)\n return settings", "def get_config(_config_file):\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')", "def read_config(self):\n return json.load(open(self.config_dir + \"/\" + self.graph_type.lower() + \"_config.json\"))", "def _read_project(self, filename):\n parser = configparser.ConfigParser()\n parser.read(filename, \"utf8\")\n return parser", "def read_file():\n require_login = \"Log into KAMONOHASHI first to use 'account login' command.\"\n\n if os.path.exists(config_file_path):\n logging.info('open config file %s', config_file_path)\n with open(config_file_path) as f:\n logging.info('begin io %s', config_file_path)\n config_file = json.load(f)\n logging.info('end io %s', config_file_path)\n if not {'server', 'token'} <= set(config_file.keys()):\n raise Exception('Invalid configuration file {config_file_path}. {require_login}'\n .format(config_file_path=config_file_path, require_login=require_login))\n return config_file\n\n raise Exception('No configuration file {config_file_path}. {require_login}'\n .format(config_file_path=config_file_path, require_login=require_login))", "def read_config(filename):\n parser = configparser.ConfigParser()\n parser.read(filename)\n logging.info(f'Read config file \\'{filename}\\'.')\n return parser", "def _get_config_from_file(self, filename):\n\n with open(filename, 'r') as f:\n config = load(f)\n return config", "def readconfig(self):\n files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),\n path.join(self.ccddpath, 'config', 'Config.ini'),\n self.outputConfig]\n last = sorted(files, reverse=True,\n key=lambda f: path.getmtime(f) if path.isfile(f) else 0)\n \n log.debug(\"Reading config settings from %s\", last[0])\n try:\n with open(last[0]) as f:\n return f.read()\n except FileNotFoundError:\n return None", "def read_config():\n parser = OptionParser()\n parser.add_option(\"-c\", \"--config\", dest=\"conf_path\", type=\"string\", help=\"config file path\")\n (options, args) = parser.parse_args()\n\n config.readfp(open(options.conf_path)) # \"threadbot.cfg\"\n subreddit = config.get(\"threadbot\", \"subreddit\")\n username = config.get(\"threadbot\", \"username\")\n password = config.get(\"threadbot\", \"password\")\n\n return subreddit, username, password", "def read_config(self):\n\n if self.cached_config is not None:\n return self.cached_config\n try:\n with open(\"config.json\") as config:\n try:\n parsed_config = json.loads(config.read())\n self.cached_config = parsed_config\n return self.cached_config\n except json.JSONDecodeError as ex:\n print(\"[Configuration] [ERROR] invalid json in config file, cannot read.\")\n print(\"[Configuration] [ERROR] error information: {info}\".format(info=ex.msg))\n return dict()\n except Exception as ex:\n print(\"[Configuration] [ERROR] an error occurred while reading your config file. please make sure it is \"\n \"accessible by the program and there are no other issues with the file.\")\n print(\"[Configuration] [ERROR] exception info: {exception}\".format(exception=str(ex)))\n return dict()", "def load_config(self) -> Dict[str, Any]:\n # Load all configs\n config: Dict[str, Any] = self.load_from_files(self.args.get(\"config\", []))\n\n return config", "def read_config_file():\n # Read in the config file to get sensative (non-git) email info\n with open('assets/config.yaml', 'r') as f:\n dikt = yaml.safe_load(f)['email_config']\n\n # Allows to access this dict as if it were an object\n # TODO do we need this? Is there a better way?\n class ObjectView():\n def __init__(self, d):\n self.__dict__ = d\n return ObjectView(dikt)", "def readConfig(self, cfg='hamsterPrinter.cfg'):\n from configparser import ConfigParser\n parser = ConfigParser()\n parser.read(cfg)\n return parser", "def _get_config_file(self, config_file):\n with config_file:\n myconfig = yaml.load(config_file)\n return myconfig", "def read_config(self, filename):\n heading = None\n with open(filename) as fin: # open the file\n for line in fin:\n line = line.strip() # cut the tail\n if line.startswith('==') and line.endswith('=='): # detect headings\n heading = line[2:-2] # heading\n self._config[heading] = {} # create a dictionary for the heading\n elif line.count(':') == 1 and heading is not None: # detect attribute\n attr, _, value = line.partition(':') # get attribute and their value\n self._config[heading][attr[:-1]] = value[1:] # update into dic\n elif line == \"\": # if line is empty, skip\n continue\n else: # bad line\n messagebox.showerror(\"Error\", \"Bad config file, I can't read it!\")\n return self._config", "def read_config(name):\n import yaml\n fname=get_config_file(name)\n #print(\"reading:\",fname)\n with open(fname) as fobj:\n data=yaml.load(fobj)\n return data", "def get_config_contents() -> str:\n config_file = os.environ.get(\"PYP_CONFIG_PATH\")\n if config_file is None:\n return \"\"\n try:\n with open(config_file, \"r\") as f:\n return f.read()\n except FileNotFoundError as e:\n raise PypError(f\"Config file not found at PYP_CONFIG_PATH={config_file}\") from e", "def _read_config(filename):\n\n c = {}\n with open(filename, \"r\") as f:\n for line in f:\n key, val = line.split(\"=\")\n key = key.strip()\n val = val.strip()\n c[key] = val\n return c", "def get_config(self):\n return ConfigFile.from_file(path.join(self.run_dir, \"os-stdin\"))", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def get_config(self, fn = 'config.txt'):\n self.config={}\n fn = os.path.join(self.folder,fn)\n if not os.path.exists(fn): return\n txt = open(fn).read()\n if txt[0]=='{':\n # new format: just a dumped dict\n self.config = eval(txt) \n # old format: more readable\n for line in txt:\n item = line.split(':')\n if len(item)>1:\n self.config[item[0].strip()]=item[1].strip()", "def read_config_file() -> typing.MutableMapping[str, typing.Any]:\n return _read_file()", "def get_config(config_file):\n config = ConfigParser()\n found = config.read(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), config_file)\n )\n if not found:\n raise ValueError(\"No config file found!\")\n return config", "def readConfig(configFile):\n # returns list of parameters \n # with key 'name'\n logging.debug(\"Loading config\")\n with open(configFile) as json_file: \n try:\n d = json.load(json_file)\n\n except:\n print (\"failed to parse configuration\")\n else:\n return d\n logging.debug(\"Config Loaded\")", "def load_configuration(config_file):\n filename = config_file\n config = configparser.ConfigParser()\n config.read(filename)\n\n return config", "def load_config(self, config_file = None):\n if config_file:\n return ET.parse(config_file)\n else:\n return ET.parse(self.config_file)", "def read_config_file():\n\tsuccess = config_parser.read([config_file])\n\n\tif not success:\n\t\tprint \"Failed to parse config file '%s'\" % config_file\n\t\treturn False\n\n\treturn True", "def readConfig(file=\"config.ini\"):\n ip_pool = []\n cmd_pool = []\n Config=ConfigParser.ConfigParser()\n Config.read(file)\n machines = Config.items(\"MACHINES\")\n commands = Config.items(\"COMMANDS\")\n for ip in machines:\n ip_pool.append(ip[1])\n for cmd in commands:\n cmd_pool.append(cmd[1])\n print cmd[1]\n return ip_pool,cmd_pool", "def _read_config(filename):\n parser = configparser.RawConfigParser()\n if filename and not parser.read(filename):\n sys.stderr.write(\"Unable to open configuration file %s. Use --config='' to disable this warning.\\n\" % filename)\n\n config = {}\n\n for section, defaults in BASE_CONFIG.items():\n # Patterns are handled separately\n if section == 'patterns':\n continue\n\n for name, descr in defaults.items():\n kind, default = descr\n if section in parser.sections() and name in parser.options(section):\n if kind == 'int':\n value = parser.getint(section, name)\n elif kind == 'float':\n value = parser.getfloat(section, name)\n elif kind == 'bool':\n value = parser.getboolean(section, name)\n else:\n value = parser.get(section, name)\n else:\n value = default\n config[name] = value\n\n if 'patterns' in parser.sections():\n patterns = [parser.get('patterns', opt) for opt in parser.options('patterns')]\n else:\n patterns = DEFAULT_PATTERNS\n config['patterns'] = patterns\n\n return config", "def read_config(self):\n try:\n config_dict = None\n if not os.path.exists(self.config_file_path):\n raise Exception(\"Class Config_Manager- read_config(..): \" +\n self.config_file_path + \" does not exist\")\n with open(self.config_file_path, 'r') as stream:\n try:\n config_dict = yaml.load(stream)\n # print(type(config_dict))\n except yaml.YAMLError as exc:\n print(exc)\n return config_dict\n except Exception as error:\n utilities.show_exception_info(error)\n raise error", "def parse_config(config_file):\n\n conf = {}\n config = configparser.ConfigParser()\n valid_schedule = r'\\d{1,2}:\\d{2}(:\\d{2})*\\s+[AM|PM]'\n \n #configparser does not throw exception (empty dataset if files are not found)\n if(len(config.read(config_file)) == 0):\n raise FileNotFoundError(\"Failed to find config file\")\n\n\n conf['credentials'] = {\"username\": config['credentials']['username'], \"password\": config['credentials']['password']}\n conf['hashtags'] = [hashtag for hashtag in config['hashtags'].values()]\n conf['schedule'] = [time.upper() for time in config['schedule'].values() if re.search(valid_schedule,time, re.IGNORECASE)]\n conf['driverpath'] = config['driver']['path']\n\n return conf", "def parseConfigFile(self, config_file_path):\n parser = configparser.SafeConfigParser()\n parser.read(config_file_path)\n self.seuil_snr = int(parser.get('seuils', 'snr'))\n self.seuil_elev_sat = int(parser.get('seuils', 'sat_elevation'))\n\n # nav data path\n self.nav_data_file = parser.get('data', 'nav')\n\n print(self.nav_data_file)\n\n # obs data paths\n self.obs_data_file = parser.get('data', 'obs').split(\",\")\n\n print(self.obs_data_file)", "def parse(self):\n ret_val = {}\n\n with open(self.config_file_path) as config_file:\n config = json.load(config_file)\n\n project_name = config['projectName']\n ret_val['project_name'] = project_name\n\n upstream_urls = config['upstreamURLs']\n ret_val['upstream_urls'] = upstream_urls\n\n base_location = config['baseLocation']\n ret_val['download_locations'] = Parser.parse_directories(\n config['directories'], base_location, project_name)\n\n return ret_val", "def get_config():\n ini_path = os.environ.get(\"SPI_TOOLS_CONFIG_FILE\",\n (Path.home() / \"www/python/config.ini\").as_posix())\n\n # Apparently git doesn't preserve file modes, so this fails on\n # unsafe-sample-config.ini\n #\n # ini_mode = os.stat(ini_path).st_mode\n # if ini_mode & 0o77:\n # raise RuntimeError(\"%s has mode %o: access by non-owner disallowed\" %\n # (ini_path, ini_mode))\n config = configparser.ConfigParser()\n config.read(ini_path)\n return config", "def parse_configuration_file(config_file):\n try:\n parser = parser_bnf()\n result = parser.parseFile(config_file, parseAll=True)\n except (ParseException, ParseSyntaxException) as e:\n print(\"ERROR: {m}\".format(m=str(e)))\n sys.exit(1)\n return result", "def parse_config_file(config_file):\n parsed = {}\n\n try:\n with open(config_file, \"r\") as data:\n for line in data.readlines():\n if \"=\" not in line:\n continue\n key, val = line.split(\"=\", 1)\n parsed[key] = val.strip()[1:-1]\n except IOError:\n logging.error(\"%s doesn't exist\" % config_file)\n raise\n\n return parsed", "def get_config(cls):\n path_home = os.path.expanduser('~')\n path_app = os.path.dirname(__file__)\n config = ConfigParser.RawConfigParser()\n paths = [Config.DEFAULT_CONFIGURATION_FILE,\n \"%s/.compta/server.cfg\" % path_home,\n \"%s/../server.cfg\" % path_app\n ]\n get_file = False\n for path in paths:\n if os.path.exists(path):\n try:\n config.read(path)\n get_file = True\n except ConfigPArser.ParsingError as error:\n print error\n sys.exit(1)\n break\n if not get_file:\n print \"No config files found\"\n sys.exit(1)\n\n dict_config = {}\n try:\n dict_config[\"database_path\"] = config.get(\"Database\", \"path\")\n dict_config[\"database_name\"] = config.get(\"Database\", \"name\")\n except ConfigParser.NoSectionError as error:\n print error\n sys.exit(1)\n except ConfigParser.NoOptionError as error:\n print error\n sys.exit(1)\n return dict_config", "def get_config(_config_file='config.json'):\n try:\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')\n\n except Exception as _e:\n print(_e)\n raise Exception('Failed to read in the config file')", "def load_config(configfile=\"../data/test.cfg\"):\n\n config = configparser.ConfigParser()\n config.read([configfile])\n return config", "def get_config(self, config_path):\n # Cut-down version of the function in cookiecutter.config\n\n if not os.path.exists(config_path):\n self.fail(\n f\"Config file {config_path} does not exist.\"\n )\n\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n self.fail(\n f\"Unable to parse config file {config_path}: {e}\"\n )\n\n return yaml_dict", "def load_config(f):\n config = ConfigParser.RawConfigParser()\n config.readfp(f)\n # Mininum required data. Raises exception if non-existent.\n config.get('memrise', 'username')\n config.get('beeminder', 'username')\n config.get('beeminder', 'auth_token')\n config.get('beeminder', 'goal_slug')\n return config", "def parse_file(self, fpath):\n sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir))\n with open(os.path.join(sdir, fpath), \"rb\") as f:\n return f.readlines()", "def get_config(config_file):\n config = yaml.load(open(path.join(path.dirname(path.abspath(__file__)),\n config_file)))\n return config", "def read_options(self,options_file):\n config=ConfigParser.ConfigParser()\n config.read(options_file)\n return config", "def _load_configuration(cls, config_file=None):\n config = SafeConfigParser()\n # add the defaults first\n for section, settings in CmdContext.DefaultValues.items():\n config.add_section(section)\n for option, value in settings.items():\n config.set(section, option, value)\n # read the config files\n\n config_files = []\n if config_file:\n config_files.append(config_file)\n else:\n config_files.extend(CmdContext.DefaultConfigFiles)\n\n for config_file in config_files:\n if os.access(config_file, os.F_OK | os.R_OK):\n config.read(config_file)\n return config\n\n return config", "def parse():\n rcParams = configparser.ConfigParser(defaults=defaults())\n rcParams.read([os.path.join(os.getcwd(), 'watershed_workflowrc'),\n os.path.join(os.getcwd(), '.watershed_workflowrc'),\n os.path.join(home(), '.watershed_workflowrc')])\n return rcParams", "def parse_config_file(fpath):\n if not os.path.isfile(fpath):\n raise RuntimeError('ERROR: Unable to find config file at path: {}'\n .format(fpath))\n\n with open(fpath, 'r') as f:\n return yaml.safe_load(f)", "def readConfig(filename='config.json'):\n with open(filename, 'r') as f:\n return json.load(f)", "def readConfig(filepath=None):\n result = None\n if filepath is None:\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"res/\", \"config.ini\")\n if os.path.exists(filepath):\n config = ConfigParser.ConfigParser()\n config.read(filepath)\n result = config\n return result", "def get_config():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(\"{0}/config.yaml\".format(dir_path), 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n return cfg" ]
[ "0.77496666", "0.7607564", "0.75940424", "0.75590175", "0.7350562", "0.7329519", "0.7322378", "0.73064345", "0.7271919", "0.72459453", "0.71848226", "0.7173341", "0.71679926", "0.7166633", "0.71516", "0.7046862", "0.7030489", "0.70298284", "0.6951747", "0.693856", "0.6933751", "0.6929327", "0.69200516", "0.6907919", "0.69077086", "0.69007176", "0.68702143", "0.6862842", "0.685601", "0.6842408", "0.680738", "0.6801018", "0.68003356", "0.6797445", "0.67942375", "0.6788342", "0.67797583", "0.6773913", "0.6773913", "0.6769937", "0.6766008", "0.6765501", "0.6761683", "0.6752277", "0.6752133", "0.6748014", "0.67149734", "0.6703987", "0.67018336", "0.67010826", "0.6685184", "0.6675677", "0.66740036", "0.6673119", "0.6672711", "0.66723084", "0.666121", "0.6659292", "0.6656552", "0.6655666", "0.66288203", "0.6619307", "0.66077924", "0.66009015", "0.6598895", "0.6597619", "0.65956384", "0.6593544", "0.65921015", "0.6589805", "0.658632", "0.6577281", "0.65764296", "0.65745115", "0.6549523", "0.65475893", "0.6543077", "0.65170586", "0.6513356", "0.65117836", "0.6506364", "0.6492036", "0.648936", "0.64843476", "0.64822525", "0.64812934", "0.6478919", "0.6467233", "0.64642143", "0.6459574", "0.64522654", "0.643672", "0.64279044", "0.6425619", "0.64236605", "0.6415771", "0.6414916", "0.6414884", "0.6413357", "0.6412651" ]
0.76075804
1
Return the Path of the cache file for the key.
def cache_file(cache_key): return MASTOOLS_DIR / f"{cache_key}_cache.json"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache_file_path(self) -> str:\n return self.cache_file_path", "def file_path(self, key=None):\n if key is not None:\n return os.path.join(self.directory, self.file_name(key))\n return getattr(\n self.agent,\n constants.CONST_LOCK_FILE,\n os.path.join(self.directory, self.file_name()),\n )", "def get_cache_path(self):", "def get_cache_path(self):", "def cache_path(self):", "def cache_path(self):", "def cache_key(self):\n\n return \"{}.json\".format(self.path)", "def _get_cache_file_path(self, identifier):\n cache_file_name = get_cache_file_name(identifier)\n cache_file_path = os.path.join(self.cache_path, cache_file_name)\n\n return cache_file_path", "def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret", "def key_path(self):\n keypath = self._get_field('System', 'keypath')\n localpath = \"/\".join(__file__.split('/')[:-1])\n return join(localpath, keypath)", "def get_cache_file_path(self, URL):\n\n filename = hashlib.md5(URL.encode('utf-8')).hexdigest() + '.wbc'\n path = pathlib.Path(config.WEATHER_PROVIDERS['App']['Cache_path'])\n cache_file_path = path.joinpath(filename)\n\n return cache_file_path", "def get_cache_file_path(self):\n home_path = os.path.expanduser(\"~\")\n # path to the programs cache directory\n full_cache_dir = os.path.join(home_path, \".cache\", CACHE_DIR)\n\n if not os.path.exists( full_cache_dir ):\n os.makedirs( full_cache_dir )\n \n return os.path.join( full_cache_dir, FILE_NAME )", "def get_preference_file_cache_destination_path():\n\n return read_preference_key(search_key=\"cache_manager_cache_path\")", "def cache_key(self):\r\n statinfo = os.stat(self.pathname)\r\n return (self.filename + str(statinfo.st_mtime)).encode('ascii', 'ignore')", "def get_cached_path(self):\n if util.IS_CACHE_ENABLED and not self.physical_key.is_local():\n return ObjectPathCache.get(str(self.physical_key))\n return None", "def _get_cached_filepath(prefix, url):\n filename = '{prefix}_{hash_string}.cache'.format(\n prefix=prefix,\n hash_string=_hash_value(url),\n )\n logger.debug('Cached filepath: ' + os.path.join(CACHE_DIRECTORY, filename))\n return os.path.join(CACHE_DIRECTORY, filename)", "def _get_cache_filename(self):\n home_dir = os.path.expanduser(\"~\")\n filename = 'dbcollection.json'\n return os.path.join(home_dir, filename)", "def _cachefilename(self, cachedir):\n\n return cachedir / \"filename\"", "def _get_buckets_cache_filename():\n\n cache_dir = _get_cache_dir()\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n return os.path.join(cache_dir, \"buckets_files.cache\")", "def cache_path(self, vpath):\n return os.path.join(self.cache_root, \n *vpath.split('/') )", "def cache_path(self):\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path", "def _GetCachedFileByPath(self, safe_key_path):\n longest_key_path_prefix = u''\n longest_key_path_prefix_length = len(longest_key_path_prefix)\n for key_path_prefix in self._registry_files.iterkeys():\n if safe_key_path.startswith(key_path_prefix):\n key_path_prefix_length = len(key_path_prefix)\n if key_path_prefix_length > longest_key_path_prefix_length:\n longest_key_path_prefix = key_path_prefix\n longest_key_path_prefix_length = key_path_prefix_length\n\n if not longest_key_path_prefix:\n return None, None\n\n registry_file = self._registry_files.get(longest_key_path_prefix, None)\n return longest_key_path_prefix, registry_file", "def get_cache_file(self, dependencies):\n filename = '%s.tar' % self.get_cache_key(dependencies)\n return os.path.join(self.cache_directory, filename)", "def cache_file(self, repo):\n token = blake2b(repo.location.encode()).hexdigest()[:10]\n dirname = f\"{repo.repo_id.lstrip(os.sep)}-{token}\"\n return pjoin(self.options.cache_dir, \"repos\", dirname, self.cache.file)", "def get_path(self, key):\n return get_path(self, key)", "def key_file(self):\n return self._get('key_file')", "def _get_path_to_key_file():\n\n if 'private_key_path' not in ctx.node.properties:\n raise NonRecoverableError(\n 'Unable to get key file path, private_key_path not set.')\n\n return os.path.expanduser(ctx.node.properties['private_key_path'])", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def _get_cached_file_name(bucket_name, saltenv, path):\n\n file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)\n\n # make sure bucket and saltenv directories exist\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n return file_path", "def cache_path(self):\n benchmark_name, image_set_name, _ = self.name.rsplit(\"_\", 2)\n cache_path = os.path.join(self._cache_path,'{}_{}_cache'.format(benchmark_name, image_set_name))\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path", "def cache_key(cls) -> str:\n return cls._cache_key", "def get_cachefile(filename):\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n return os.path.join(cachedir, filename)", "def get_cache_filename(self):\n filename = _slugify(self.parent_filename.replace('.py', ''))\n funcname = _slugify(self.__name__)\n folder = os.path.curdir if USE_CURRENT_DIR else os.path.dirname(self.parent_filepath)\n return os.path.join(folder, filename + '_' + funcname + '.cache')", "def cachedir(self):\n\n return self._cachedir", "def _get_key_path(self, key):\n if not isinstance(key, str):\n raise TypeError(\"pickle keys must be strings\")\n path = abspath(join(self._path, key + \".pkl\"))\n if not path.startswith(abspath(self._path)):\n raise OSError(joins(\"invalid path to pickle file:\", path))\n return path", "def cache_key(self):", "def _2to3_cache_path(self, path):\n head, tail = os.path.split(path)\n base_filename, sep, tail = tail.partition('.')\n filename = ''.join([base_filename, sep, self.tag, sep, tail])\n return os.path.join(head, '__pycache__', filename)", "def path(self) -> str:\n return os.path.join(DIR_CACHE_TABLES, f\"{self.name}.parquet\")", "def get_cache_path(app_path, er_config, entity_type):\n string = json.dumps(er_config, sort_keys=True)\n hashid = Hasher(algorithm=\"sha1\").hash(string=string)\n hashid = f\"{hashid}$synonym_{entity_type}\"\n\n return path.get_entity_resolver_cache_file_path(app_path, hashid)", "def __cache_path(song_name, artist):\n song_name = REGEX_FILEPATH_GUARD.sub(\"-\", song_name)\n artist = REGEX_FILEPATH_GUARD.sub(\"_\", artist)\n cache_name = \"-\".join([artist, song_name]) + \".json\"\n cache_path = op.join(op.dirname(__file__), \"data\", \"cache\", cache_name)\n\n return cache_path", "def cachepath(self):\n return [self.fs.cachepath(uri) for uri in self.uri]", "def path(self) -> str:\n return os.path.join(DIR_CACHE_DATASETS, f\"{self.name}.parquet\")", "def _get_instrument_cache_file_path(instrument_name, start_date, end_date, cache_dir):\n\n identifier = f'{instrument_name}_{start_date}_{end_date}'\n return os.path.join(cache_dir, f'{identifier}.data')", "def get_processed_path(self):\n location = self.get_storage().location\n return self.get_processed_key_name()[len(location):]", "def archivepath(self, cachefname):\n modtime = os.stat(cachefname).st_mtime\n ext = '.'+self.serializer\n base = cachefname.rsplit(ext, 1)[0]\n ret = '%s-%f%s' % (base, modtime, ext)\n return ret", "def _get_cache_key(self, event):\n if (isinstance(event, FileMovedEvent) and\n self._filter(event.dest_path)):\n path = event.dest_path\n else:\n path = event.src_path\n return path", "def cache_directory(self) -> str:\n # TODO: Find better solution than local import?\n from settings import CACHE_DIR\n return os.path.join(\n CACHE_DIR,\n self.name.lower())", "def _get_cache_key(self):\n\n return '__CACHED__{method}__'.format(\n method=function_utils.get_fully_qualified_name(self.fget).upper())", "def _get_cache_dir(self):\n return self.manager.cache_dir", "def cache_key_name(cls):\r\n return 'configuration/{}/current'.format(cls.__name__)", "def cache_path(cls):\n system_cache_path = SystemCachePath()\n\n if sys.platform.startswith('win'):\n cache_directory = system_cache_path.cache_path_win()\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager', 'Cache')\n else:\n if sys.platform == 'darwin':\n cache_directory = system_cache_path.cache_path_macos()\n else:\n cache_directory = system_cache_path.cache_path_unix()\n\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager')", "def _get_cache_dir(self):\n return self.data['info']['root_cache_dir']", "def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))", "def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))", "def getCacheFile(ns, digest):\n return os.path.join(getDir(cacheDir, ns), digest)", "def _get_cache_filename(name, filename):\n filename = os.path.abspath(filename)[1:]\n home_folder = os.path.expanduser('~')\n base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')\n\n return os.path.join(base_cache_dir, name, filename)", "def get_key_data_filepath():\n global key_filepath, directory\n filename = 'key.csv'\n key_filepath = os.path.join(directory, filename)", "def _get_key_path(self, key_name, serial):\n return '%s%s/%d_%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, serial,\n key_name)", "def private_key_path(self):\n if self._private_key_path is not None:\n return self._private_key_path\n\n location = self.settings.Location\n if location.AttachmentName:\n self._private_key_path = 'kdbx-attachment:///{}/{}'.format(\n self.entry.path, location.AttachmentName.text)\n return self._private_key_path\n else:\n self._private_key_path = location.FileName.text\n return self._private_key_path", "def cache(self):\n return f'var/cache/{self.environment}'", "def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile", "def get_cache_key(self):\n return get_cache_key(\n self.__class__.__name__, settings=(self.pk, ))", "def cache_key(self):\n return self.__class__.create_cache_key(self.key, **self.get_kwargs())", "def _abs_path(self, path):\n\n debug(\"current cache: %s\", self._cache)\n\n #save path in argument\n arg_path = path\n try:\n #try to return whats in cache:\n debug(\"trying to access %s path in cache\", arg_path)\n return self._cache[arg_path]\n except KeyError:\n debug(\"%s not found in cache\", arg_path)\n #normalize path:\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.normpath(path)\n #save the result in the cache:\n self._cache[arg_path] = path\n debug(\"stored %s in cache\", self._cache[arg_path])\n return path", "def getpath(self, key, default=''):\n path = self.get(key, default)\n if not path:\n return default\n return self._normalize_path(path, self.env)", "def _cache_filename(self, filename=None):\n if not self.persistent:\n return None\n\n if filename is not None:\n return filename\n\n if self.persistent is not True:\n return self.persistent\n\n return self._default_persistent_cache", "def cache_key_part(self) -> str:\n return self.name", "def getAsAbsolutePath(self, key, default=None):\n\n filename = self.get(key, default)\n if filename is None:\n raise KeyError('Config key [%s] not found' % (key,))\n\n return self._getAbsolutePath(filename)", "def cache_key(self):\n return self.value", "def get_local_cache_folder(self):\n\n return self._cache_folder", "def cache_name(name, typ=\"pkl\"):\n return os.path.join(CACHE_DIR, name + '.' + typ)", "def use_cached_files(self, cache_key):\r\n pass", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def get_cache_filename(typ, dumpname, address=None):\n fname = typ\n if address is not None:\n fname = '%x.%s' % (address, typ)\n return os.path.sep.join([get_cache_folder_name(dumpname), fname])", "def get_resource_cache_path(self, resource_id, create=False):\n path = os.path.abspath(os.path.join(self.cache_dir, self.server_domain, resource_id))\n if create:\n with contextlib.suppress(OSError):\n os.makedirs(path)\n return path", "def path_apparmor_cache(self) -> Path:\n return self.path_supervisor / APPARMOR_CACHE", "def rdap_info_cache_directory() -> str:\n current_path = Path(__file__).resolve().parent\n return os.path.join(current_path, 'cache', 'rdap')", "def get_cache(self, key):\n return self.r.get(key)", "def path(self):\n return self.storage.path(self.name)", "def cached_shapefile_path(fpath):\n\n p, ext = os.path.splitext(fpath)\n\n if ext.lower() == '.p':\n # No need to recache pickled files (this is for nested calls)\n return fpath\n\n if ext.lower() != '.shp':\n raise ValueError('File extension not recognised: {}'.format(ext))\n\n # Cached directory and file\n cp = os.path.commonprefix([cache_dir, p])\n cp = os.path.join(cache_dir, hash_cache_dir + '_shp',\n os.path.relpath(p, cp))\n ct = '{:d}'.format(int(round(os.path.getmtime(fpath)*1000.)))\n of = os.path.join(cp, ct + '.p')\n if os.path.exists(cp):\n # We have to check if the file changed\n if os.path.exists(of):\n return of\n else:\n # the file has changed\n shutil.rmtree(cp)\n\n os.makedirs(cp)\n return of", "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "def filename_for_key(self, key, extension=None):\n if extension is None:\n extension = self.file_extension\n f = self.key2basename(key) + extension\n return os.path.join(self.basepath, f)", "def GetKeyByPath(self, key_path):", "def _get_cache_dir():\n\n # Or is that making too many assumptions?\n return os.path.join(__opts__[\"cachedir\"], \"s3cache\")", "def _get_key_url(self, key):\n urls = self.get_URLS(key)\n\n if len(urls) == 1:\n return urls[0]\n else: # multiple\n # TODO: utilize cache to check which archives might already be\n # present in the cache.\n # Then if not present in the cache -- check which are present\n # locally and choose that one to use\n if self._last_url and self._last_url in urls:\n return self._last_url\n else:\n return urls[0] # just the first one", "def file_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"file_key\")", "def file_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"file_key\")", "def asset_path(bundle_key: str) -> str:\n asset_base_path = current_app.config.get('ASSET_BASE_PATH', '')\n asset_file = current_app.config.get('assets', {}).get(bundle_key)\n if not asset_file:\n raise LookupError(f\"Missing asset file for {bundle_key}.\")\n return os.path.join(asset_base_path, asset_file)", "def get(key):\n return Cache.cache_connector.get(key)", "def get(self, key):\n # type: (str) -> str\n return self.__cache_get(key)", "def generate_cache_key(req, method: str = None) -> str:\n\n path = req.path\n if path.endswith('/'):\n path = path[:-1]\n\n if not method:\n method = req.method\n\n return f'{path}:{method.upper()}'", "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "def get_suitable_cache_subpath(cls, py_file):\n path = os.path.abspath(py_file)\n subpath = os.path.dirname(path)\n parentdir = os.path.split(subpath)[-1]\n # Use SHA1 to reduce path length.\n # Note: windows doesn't like long path.\n hashed = hashlib.sha1(subpath.encode()).hexdigest()\n # Retain parent directory name for easier debugging\n return '_'.join([parentdir, hashed])", "def ivy_cache_dir(self):\r\n return self._ivy_cache_dir", "def get_upload_path(self):\n location = self.get_storage().location\n return self.cleaned_data['key_name'][len(location):]", "def _cache_key_to_dir(cachedir, func, argument_hash):\r\n parts = [cachedir]\r\n if isinstance(func, _basestring):\r\n parts.append(func)\r\n else:\r\n parts.append(_get_func_fullname(func))\r\n\r\n if argument_hash is not None:\r\n parts.append(argument_hash)\r\n return os.path.join(*parts)", "def file_key(filename):\n return FILE_PREFIX + filename", "def _get_akey_afile(self, key):\n url = self._get_key_url(key)\n return self._parse_url(url)[:2] # skip size", "def path(self):\n return self.file_path()", "def get_model_cache_file(problem_name, model_id, base_data_dir=None):\n raw_problem_data_dir, _, model_cache_dir = _get_base_dirs(base_data_dir)\n\n return \"{model_cache_dir}/{problem_name}/{model_id}/{model_file_name}\" \\\n .format(model_cache_dir=model_cache_dir,\n problem_name=problem_name,\n model_id=model_id,\n model_file_name=_model_file_name)" ]
[ "0.81592643", "0.7996879", "0.7945717", "0.7945717", "0.7943165", "0.7943165", "0.7821009", "0.7612195", "0.7606929", "0.74966335", "0.74500173", "0.7417488", "0.73771125", "0.7359973", "0.73446333", "0.7326632", "0.7269066", "0.7237881", "0.7232423", "0.71653837", "0.7153147", "0.7145611", "0.714445", "0.7143124", "0.7094347", "0.70512605", "0.70050144", "0.697231", "0.69423157", "0.69187564", "0.6870893", "0.68536884", "0.68459386", "0.68075734", "0.67936623", "0.6785776", "0.67460734", "0.6730252", "0.6711956", "0.6708109", "0.67079234", "0.6671782", "0.66709626", "0.6667625", "0.6667145", "0.666314", "0.66566795", "0.6640053", "0.6633188", "0.662633", "0.6613856", "0.6585844", "0.6584718", "0.6584718", "0.6569201", "0.6531428", "0.6525374", "0.649434", "0.6480189", "0.6475311", "0.64649427", "0.63819796", "0.6357632", "0.6350124", "0.6327665", "0.6319121", "0.62859195", "0.62770194", "0.62739486", "0.62685424", "0.6242507", "0.6238869", "0.62332505", "0.6201673", "0.6197426", "0.6193778", "0.6183343", "0.6176085", "0.61740977", "0.61733216", "0.61555845", "0.6152343", "0.61468375", "0.61377156", "0.6125666", "0.61248076", "0.61248076", "0.61194015", "0.6103522", "0.61013585", "0.60976666", "0.6095194", "0.60946995", "0.60870016", "0.6084267", "0.60806865", "0.6078438", "0.60571724", "0.60513246", "0.6047268" ]
0.7776
7
Return the contents of the cache for the key, if its version is correct.
def load_cache(cache_key, version): # Try to get the results of the last run, but fall back to an empty dict if that's not # available. That's most likely to happen on the first run. try: cache = json.loads(cache_file(cache_key).read_text()) except FileNotFoundError: return {} if cache["version"] != version: raise ValueError( f"Unknown {cache_key} version number: expected {version}, got {cache['version']}" ) return cache[cache_key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache(self, key):\n return self.r.get(key)", "def get(self, key):\n return self.cache_data.get(key)", "def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result", "def cache_get(self, key: str) -> Optional[bytes]:\n if self.cache is not None:\n return self.cache.get(key)\n return None", "def get(key):\n return Cache.cache_connector.get(key)", "def get(self, key):\n if key:\n return self.cache_data.get(key)\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def getCache(self, key):\n return self._cache.get(key, None)", "def get(self, key):\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None", "def get(self, key):\n if key is None:\n return None\n return self.cache_data.get(key, None)", "def get(self, key):\n # type: (str) -> str\n return self.__cache_get(key)", "def get(self, key):\n return self._cache[key]", "def get(self, key):\n raise NotImplementedError(\"get must be implemented in your cache class\")", "def __getitem__(self,key):\n result = None\n # check if it's tin the cache first\n if key in self._cache:\n result = self._cache[key]\n else:\n # it's not in the cache so retrieve it\n result = self._get_from_tree(key)\n # remove None values\n result = [x for x in result if x is not None]\n self._cache[key] = result\n\n return result", "def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def get(self, key):\n if key is None or key not in self.cache_data:\n return None\n else:\n for x in self.LRU:\n if x.key == key:\n x.age = 0\n else:\n x.age += 1\n return self.cache_data[key]", "def _query_cache(self, key):\n if self._cache:\n cache_key = self._make_cache_key(key)\n try:\n val = self._cache[cache_key]\n self._log.debug('cache hit for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return val\n except KeyError:\n self._log.debug('cache miss for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return None\n else:\n self._log.debug('cache disabled (self._cache is None)')\n return None", "def get(self, key):\n if key is None:\n raise TypeError\n\n index = self.__get_cache_set_index(key)\n cache_set = self.cache_sets[index]\n h_key = self.__ensure_hashable_key(key)\n return cache_set.get(h_key)", "def __getitem__(self, key):\n self._remove_expired()\n\n cache_entry = self._d.get(key, None)\n log.debug(\"__getitem__: {}\".format(cache_entry))\n\n return cache_entry", "def get_output_from_cache(name, filename):\n cache_filename = _get_cache_filename(name, filename)\n if (os.path.exists(cache_filename) and\n os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n with io.open(cache_filename) as f:\n return f.read()\n\n return None", "def get(self, path):\n\t\treturn self.cache.get(path)", "def get(self, key):\n if key in self.cache:\n value = self.cache[key].value\n # Re-enqueue to indicate recently used\n self._re_enqueue(self.cache[key])\n return value\n else:\n return -1", "def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j", "def get(self, key, silent=False):\n result = self.get_cache(key, silent=silent)\n if result is not None:\n return result\n\n result = self.get_store(key, silent=silent)\n if result is not None:\n return result\n\n # As a last ditch effort, let's hope we have a key\n # in local cache that's possibly stale\n return self.get_local_cache(key, force_grace=True)", "def get(self, key):\n if key in self.cache:\n return self.cache[key]\n valueat,valuelen = self.keys[key]\n valuedump = self.file.readp(valueat, valuelen)\n value = pickle.loads(valuedump)\n self.cache[key] = value\n return value", "def get(self, key):\n if self.dexists('ttl', key) and int(dt.now().strftime('%s')) >= self.dget('ttl', key):\n self.rem(key)\n return None\n return super(MyCache, self).get(key)", "def memcache_full_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"memcache_full_version\")", "def get_from_cache(self, url):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n\n cache_timeout = self.cache_timeouts.get(cache_key,\n self.default_cache_timeout)\n\n data, access_time = MEM_CACHE[cache_key].get(cache_lookup, (None, 0))\n if data and time.time() - access_time < cache_timeout:\n return data\n return False", "def _get_cache(self, course_version_guid):\n if self.request_cache is None:\n return None\n\n return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)", "def needs_update(self, cache_key):\r\n return self._read_sha(cache_key) != cache_key.hash", "def _get_cache(self, course_version_guid):\r\n if not hasattr(self.thread_cache, 'course_cache'):\r\n self.thread_cache.course_cache = {}\r\n system = self.thread_cache.course_cache\r\n return system.get(course_version_guid)", "def get(self, key):\n #if any(map(lambda v : v in key.lower(), [ \"go:\", \"mondo:\", \"hp:\" ])):\n # return None\n key = self.prefix + key\n result = None\n if self.enabled:\n if key in self.cache:\n result = self.cache[key]\n elif self.redis:\n rec = self.redis.get (key)\n result = self.serializer.loads (rec) if rec is not None else None\n self.cache[key] = result\n else:\n path = os.path.join (self.cache_path, key)\n if os.path.exists (path):\n with open(path, 'rb') as stream:\n result = self.serializer.loads (stream.read ())\n self.cache[key] = result\n return result", "def get_routes_from_cache(key: str) -> str:\n\n val = client.get(key)\n return val", "def memcacheGet(self, key):\n\n key = base64.b64encode(key)\n try:\n value = self._getMemcacheClient().get(key)\n except MemcacheError:\n log.error(\"Could not read from memcache, retrying\")\n try:\n value = self._getMemcacheClient(refresh=True).get(key)\n except MemcacheError:\n log.error(\"Could not read from memcache again, giving up\")\n del self.memcacheClient\n raise DirectoryMemcacheError(\"Failed to read from memcache\")\n return value", "def test_get_versions_cached(self):\n versions = {\"foo-1.0.tar.gz\": \"../../packages/foo-1.0.tar.gz\"}\n self.index._save_index(\"foo\", versions)\n with patch(\"cheddar.index.remote.get\") as mocked:\n result = self.index.get_versions(\"foo\")\n eq_(result, versions)\n eq_(mocked.call_count, 0)", "async def _get(self, key, encoding=\"utf-8\"):\n return SimpleMemoryBackend._cache.get(key)", "def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()", "def get_object_contents(key):\n if key is None or key == \"\":\n r = jsonify(message=\"Not all required params are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n contents = cache_utils.get(key)\n return Response(contents)", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def read_result(self, key: str) -> CachedResult:\n with self._mem_cache_lock:\n if key not in self._mem_cache:\n # key does not exist in cache.\n raise CacheKeyNotFoundError()\n\n multi_results: MultiCacheResults = self._mem_cache[key]\n\n ctx = get_script_run_ctx()\n if not ctx:\n # ScriptRunCtx does not exist (we're probably running in \"raw\" mode).\n raise CacheKeyNotFoundError()\n\n widget_key = multi_results.get_current_widget_key(ctx, CacheType.RESOURCE)\n if widget_key not in multi_results.results:\n # widget_key does not exist in cache (this combination of widgets hasn't been\n # seen for the value_key yet).\n raise CacheKeyNotFoundError()\n\n result = multi_results.results[widget_key]\n\n if self.validate is not None and not self.validate(result.value):\n # Validate failed: delete the entry and raise an error.\n del multi_results.results[widget_key]\n raise CacheKeyNotFoundError()\n\n return result", "def check_artifact_cache(self, vts):\r\n return self.do_check_artifact_cache(vts)", "def get(self, key):\n if key is None or key not in self.cache_data.keys():\n return\n self.count += 1\n self.key_tracker.update({key: self.count})\n return self.cache_data.get(key)", "def cached(self, key):\n return key in self._cache", "def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result", "def __getitem__(self, key):\n if self.contains(key):\n return self._cache[key][0]\n raise CacheMissException(key)", "def get_cache(feed_key):\n\n cache_file = CACHE_DIR / (feed_key + \".json\")\n\n with open(cache_file, \"r\") as file:\n entries = json.load(file)\n\n return entries", "def get( key ):\n if ACTIVE is False:\n return None\n \n global CACHE, STATS_MISSES, STATS_HITS\n \n \"\"\" Return a key stored in the python instance cache or a None if it has expired or it doesn't exist \"\"\"\n if key not in CACHE:\n STATS_MISSES += 1\n return None\n \n value, expiry = CACHE[key]\n current_timestamp = time.time()\n if expiry == None or current_timestamp < expiry:\n STATS_HITS += 1\n return value\n else:\n STATS_MISSES += 1\n delete( key )\n return None", "def fetch(self,key):\n try:\n return self.__content[key]\n except KeyError:\n return None", "def get(self, key):\n if key is None or key not in self.cache_data:\n return None\n\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n return self.cache_data[key]", "def get_cache(self, key, silent=False):\n value = self.get_local_cache(key)\n if value is not None:\n return value\n\n if self.cache is None:\n return None\n\n cache_key = key.cache_key\n try:\n value = self.cache.get(cache_key)\n except Exception:\n if not silent:\n logger.warn(CACHE_FETCH_ERR, key.name, extra={\n 'key': key.name,\n }, exc_info=True)\n value = None\n\n if value is not None and key.ttl > 0:\n self._local_cache[cache_key] = _make_cache_value(key, value)\n\n return value", "def read_cached_file(self, path):\n if self.config.get('do_caching', False):\n ext = path.split('.')[-1]\n\n if ext == 'cache':\n with open(path, 'r') as fd:\n try:\n return fd.read()\n except UnicodeDecodeError as e:\n self.logger.warning(str(e))\n else:\n raise Exception('\"{}\" is a invalid cache file.'.format(path))", "def get(self, key, lock):\n raise NotImplementedError()", "def memcache_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"memcache_version\")", "def get(self, key):\n if self._is_expired():\n send_forwarder_internal_metrics(\"local_cache_expired\")\n logger.debug(\"Local cache expired, fetching cache from S3\")\n self._refresh()\n\n function_tags = self.tags_by_id.get(key, [])\n return function_tags", "def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)", "def get(self, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n if os.path.isfile(cache_file_path):\n with open(cache_file_path, 'rb') as fp:\n result = pickle.load(fp)\n return result\n\n return None", "def redis_cache(key, query, ttl=30):\n print 'checking redis cache'\n if r.get(key):\n print 'returning data found in cache'\n return r.get(key)\n else:\n print 'retrieving and caching new query results'\n results = cleanup_queries(query())\n js = json.dumps(results)\n r.set(key, js)\n r.expire(key, ttl)\n return js", "def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)", "def getCacheContents(self):\n return self._cache", "def cache_key(self):\r\n statinfo = os.stat(self.pathname)\r\n return (self.filename + str(statinfo.st_mtime)).encode('ascii', 'ignore')", "def __getitem__(self, key):\n return self.__contents.get(key)", "def get_from_redis(self, key: str):\n data = self.redis_client.hget(self.root_path, key)\n if data:\n return data.decode()\n return None", "def get_cache(self, key, default=None):\n batch_number, value = self._cache.get(key, (None, None))\n if batch_number == self.model.batch_number:\n return value\n else:\n return default", "def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None", "def get(self, key):\n if type(key) != str:\n raise TypeError(\"This is not the string you're looking for!\")\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n try:\n return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value\n except AttributeError:\n return None", "def get(self, key):\n value = self.driftwood.cache.download(\"DB:\"+key) # Check if the value is cached.\n\n if not value:\n value = self.__scaffydb.get(key)\n\n if not value:\n self.driftwood.log.msg(\"ERROR\", \"Database\", \"no such key\", \"\\\"{0}\\\"\".format(key))\n return None\n\n self.driftwood.cache.upload(\"DB:\"+key, value) # Cache the value.\n self.driftwood.log.info(\"Database\", \"get\", \"\\\"{0}\\\"\".format(key))\n return value", "def updated(bank, key):\n c_key = f\"{bank}/{key}{_tstamp_suffix}\"\n try:\n _, value = api.kv.get(c_key)\n if value is None:\n return None\n return salt.payload.loads(value[\"Value\"])\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error reading the key, {c_key}: {exc}\")", "def get_cache(self, key, coordinates=None):\n\n try:\n self.definition\n except NodeDefinitionError as e:\n raise NodeException(\"Cache unavailable, %s (key='%s')\" % (e.args[0], key))\n\n if self.cache_ctrl is None or not self.has_cache(key, coordinates=coordinates):\n raise NodeException(\"cached data not found for key '%s' and coordinates %s\" % (key, coordinates))\n\n return self.cache_ctrl.get(self, key, coordinates=coordinates)", "def __getitem__(self, key):\n if self._cache is None:\n self._cache = self.storer.read()\n\n try:\n return self._cache[key].values\n except KeyError:\n return self._get_constant_array(key)", "def _read_cache_directory(key: str) -> Union[str, None]:\n with open(\n os.path.join(CacheManagerSingleton.CACHE_PATH, \"directory.json\"), \"r\"\n ) as directory_file:\n directory_json = json.loads(directory_file.read())\n directory_file.close()\n loaded = True\n if loaded:\n return directory_json.get(key)\n else:\n return None", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def _GetCachedFileByPath(self, safe_key_path):\n longest_key_path_prefix = u''\n longest_key_path_prefix_length = len(longest_key_path_prefix)\n for key_path_prefix in self._registry_files.iterkeys():\n if safe_key_path.startswith(key_path_prefix):\n key_path_prefix_length = len(key_path_prefix)\n if key_path_prefix_length > longest_key_path_prefix_length:\n longest_key_path_prefix = key_path_prefix\n longest_key_path_prefix_length = key_path_prefix_length\n\n if not longest_key_path_prefix:\n return None, None\n\n registry_file = self._registry_files.get(longest_key_path_prefix, None)\n return longest_key_path_prefix, registry_file", "def test_get_versions_cached_expired_found(self):\n versions = {\"foo-1.0.tar.gz\": \"/remote/packages/foo-1.0.tar.gz?base=http%3A%2F%2Fpypi.python.org\"} # noqa\n\n HTML = dedent(\"\"\"\\\n <html>\n <body>\n <a href=\"../../packages/foo-1.0.tar.gz\"/>foo-1.0.tar.gz</a>\n </body>\n </html>\"\"\")\n\n ok_(not self.app.redis.exists(self.index._key(\"foo\")))\n self.index._save_index(\"foo\", versions)\n with patch.object(self.index, \"_is_expired\", lambda ttl: True):\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.ok\n mocked.return_value.headers = {\"content-type\": \"text/html\"}\n mocked.return_value.text = HTML\n result = self.index.get_versions(\"foo\")\n eq_(result, versions)\n eq_(mocked.call_count, 1)\n ok_(self.app.redis.exists(self.index._key(\"foo\")))", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def _get(self, key: bytes, can_be_prefix=False, must_be_fresh=False) -> bytes:\n if not can_be_prefix:\n record = self.db.get(key)\n if record == None:\n return None\n value, expire_time_ms = pickle.loads(record)\n if not must_be_fresh or expire_time_ms != None and expire_time_ms > int(time.time() * 1000):\n return value\n else:\n return None\n else:\n for _, v_e in self.db.iterator(prefix=key):\n value, expire_time_ms = pickle.loads(v_e)\n if not must_be_fresh or expire_time_ms != None and expire_time_ms > self.time_ms():\n return value\n return None", "def cache_key(self):", "def get(key, fxn_load):\n CacheLayers.fill_request_cache()\n\n if not request_cache.cache.get(key):\n request_cache.cache[key] = fxn_load()\n\n return request_cache.cache[key]", "def get_cache(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).get_cache(name)", "def get_cached(self, keyword, search_engine, scrapemode, page_number):\n if self.config.get('do_caching', False):\n file_name = self.cached_file_name(\n keyword,\n search_engine,\n scrapemode,\n page_number\n )\n cache_dir = self.config.get('cachedir', self.CACHEDIR)\n if file_name in os.listdir(cache_dir):\n try:\n modtime = os.path.getmtime(\n os.path.join(cache_dir, file_name)\n )\n except FileNotFoundError:\n return False\n modtime = (time.time() - modtime) / 60 / 60\n if (modtime > int(self.config('clean_cache_after', 48))):\n return False\n path = os.path.join(cache_dir, file_name)\n return self.read_cached_file(path)\n else:\n return False", "def c_code_cache_version(self):\r\n return ()", "def c_code_cache_version(self):\r\n return ()", "def _read_version(rootdir: Path) -> Union[str, None]:\n version_file = rootdir.joinpath(_METADATA)\n if version_file.exists():\n with version_file.open(\"r\") as f:\n content = json.load(f)\n if _VERSION_KEY in content:\n return content[_VERSION_KEY]\n return None", "def get(self, key):\n try:\n\n item = self._item_to_dict(self.client.get_item(**self._prepare_get_request(str(key))))\n\n # If item is empty, nothing in cache\n if not item:\n return None\n\n # If current time beyond expiry, nothing to return\n if time()>float(item[self._expiry_field.name]):\n return None\n\n return self.load_object(b64decode(item.get(self._value_field.name)))\n\n except Exception as e:\n logging.info('Error getting object from DynamoDB table %s (%s): %s',self.table_name,e.__class__.__name__,e)\n return None", "def _cache_get(self, metric_name):\n encoded_metric_name = self._encode(metric_name)\n with self.__env.begin(self.__metric_to_metadata_db, write=False) as txn:\n payload = txn.get(encoded_metric_name)\n\n if payload == self._EMPTY:\n return None, True\n\n if payload is not None:\n payload = self._decode(payload)\n\n if not payload:\n # cache miss\n return None, False\n\n # found something in the cache\n split = self.__split_payload(payload)\n\n if split is None:\n # invalid string => evict from cache\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n return None, False\n\n # valid value => get id and metadata string\n # TODO: optimization: id is a UUID (known length)\n id_str, metadata_str, timestamp = split\n try:\n id = uuid.UUID(id_str)\n except Exception as e:\n logging.debug(str(e))\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n return None, False\n\n # if the timestamp expired evict it in order to force\n # its recreation for the next time\n if self.__expired_timestamp(timestamp):\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n\n metadata = self.metadata_from_str(metadata_str)\n return bg_metric.Metric(metric_name, id, metadata), True", "def get(self, key):\n if key is None or key not in self.cache_data:\n return None\n else:\n for x in self.LFU:\n if x.key == key:\n x.age = 0\n x.freq += 1\n else:\n x.age += 1\n return self.cache_data[key]", "def get_persistent_cache(self, key, default=None):\n return self._persistent_cache.get(key, default)", "def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"", "def c_code_cache_version_apply(self, node):\r\n return self.c_code_cache_version()", "def get_cache(self):\n return self.cache", "def cache_get(key, default=None):\n mc = get_cache_client()\n try:\n return decode_value(mc.get(get_key(key))) or default\n except:\n return default", "def get(self, key):\n node = self.head\n value = None\n exists = False\n while node: # Loop through nodes, looking for key\n if node.key == key:\n exists = True\n break\n\n if exists:\n if node is self.head:\n value = node.value\n else:\n self.delete(node)\n\n new_node = CacheNode(key, value)\n self.length += 1\n\n return value", "def __getitem__(self, key: Hashable) -> Any:\n return self.contents[key]", "def cache_body(self):\n with open(self.path, \"rb\") as fh:\n fh.seek(fh.tell(), os.SEEK_END)\n fh.seek(max(0, fh.tell()-LEN_CACHE_BYTES), os.SEEK_SET)\n return fh.read(LEN_CACHE_BYTES).decode('utf-8') #.split(\"\\n\")", "def get(self, key: str) -> str:\n hashkey = self._gethash(key)\n if type(self.HashMap[hashkey]) is list:\n if len(self.HashMap[hashkey]) > 2:\n \"\"\"\n Return correct Key and value from the\n location which has a hashclash\n \"\"\"\n idx = self._find_if_hashclash(key, hashkey, \"v\")\n if idx is not None:\n return self.HashMap[hashkey][idx]\n elif self.HashMap[hashkey][0] == key:\n # Check that the data matches the key and return it if it does\n return self.HashMap[hashkey][1]\n return \"\"", "def memcacheGetRecord(self, key):\n\n pickled = self.memcacheGet(key)\n return self.unpickleRecord(pickled) if pickled is not None else None", "def get(self, key):\n return self._store.get(key, None)", "def get(self, key):\n return self._store.get(key, None)" ]
[ "0.7357667", "0.717419", "0.7155493", "0.7136998", "0.6948229", "0.69267094", "0.68619025", "0.68619025", "0.68404466", "0.68349254", "0.68165344", "0.6543805", "0.65131605", "0.64544433", "0.638806", "0.63734585", "0.63579035", "0.63579035", "0.6357669", "0.63486564", "0.6345596", "0.6235852", "0.62349945", "0.6217064", "0.62062985", "0.62022203", "0.6200547", "0.6193184", "0.6153688", "0.61511785", "0.6145706", "0.61418664", "0.61350054", "0.6129076", "0.61093587", "0.60928583", "0.6087546", "0.60873556", "0.6052649", "0.60447145", "0.6008029", "0.5997265", "0.5995263", "0.59862876", "0.59698915", "0.5948349", "0.59387726", "0.5934316", "0.5903405", "0.5902553", "0.58882385", "0.5886294", "0.5876012", "0.5855223", "0.5852995", "0.5850174", "0.58370775", "0.5836519", "0.58363944", "0.5809782", "0.5806651", "0.57916385", "0.5760406", "0.5750344", "0.57440364", "0.57407814", "0.5734076", "0.5720381", "0.56988066", "0.56906694", "0.56844044", "0.5674883", "0.5668678", "0.5653393", "0.5646582", "0.5645583", "0.56450784", "0.5644847", "0.563661", "0.563234", "0.56311494", "0.56130373", "0.559415", "0.559415", "0.5587693", "0.5587647", "0.5585059", "0.55817854", "0.5578097", "0.5575946", "0.5571153", "0.5557919", "0.55406225", "0.5540465", "0.5536756", "0.5524725", "0.55203885", "0.5517384", "0.5516383", "0.5516383" ]
0.69202965
6
Write the data to the cache for the key.
def save_cache(cache_key, version, data): # Save these results for the next run. Include the version information and nest the user # information inside a "users" key from the start, because experience says if we don't do this # then the next release will add a feature that requires a change in the data layout, and then # we'll have to write a data migration or something. cache_data = {cache_key: data, "version": version} cache_file(cache_key).write_text(json.dumps(cache_data, indent=2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def update(self, cache_key):\r\n self._write_sha(cache_key)", "def write_data_cache(self, data):\n assert data, 'Must input a non-empty dictionary.'\n with open(self.cache_filename, 'w') as file_cache:\n json.dump(data, file_cache, sort_keys=True, indent=4, ensure_ascii=False)\n self.data = data # must assign the new data or risk problems", "def write_to_cache(self, data, filename):\n json_data = self.json_format_dict(data, True)\n cache = open(filename, 'w')\n cache.write(json_data)\n cache.close()", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def save_to_cache(self):\n\n ckey = self.cache_key\n\n logger.debug(f\"Saving setting '{ckey}' to cache\")\n\n try:\n cache.set(\n ckey,\n self,\n timeout=3600\n )\n except TypeError:\n # Some characters cause issues with caching; ignore and move on\n pass", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def cache_set(self, key: str, value: bytes) -> None:\n if self.cache is not None:\n self.cache.set(key, value)", "def store(self, key, value):\n self._cache[key] = value", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def put_cache(self, data, key, coordinates=None, expires=None, overwrite=True):\n\n try:\n self.definition\n except NodeDefinitionError as e:\n raise NodeException(\"Cache unavailable, %s (key='%s')\" % (e.args[0], key))\n\n if self.cache_ctrl is None:\n return\n\n if not overwrite and self.has_cache(key, coordinates=coordinates):\n raise NodeException(\"Cached data already exists for key '%s' and coordinates %s\" % (key, coordinates))\n\n with thread_manager.cache_lock:\n self.cache_ctrl.put(self, data, key, coordinates=coordinates, expires=expires, update=overwrite)", "def write(self, path, key):\n raise NotImplementedError", "def write_cache(self, write_cache: SmartSsdReadLookahead):\n\n self._write_cache = write_cache", "def _write_cache_file(self, data):\n\n with open(self.cache_file, mode='wb') as f:\n f.write(data)\n\n self.log.info(f\"Cached facilities at {self.cache_file}\")", "def set_to_cache(self, url, data):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n MEM_CACHE[cache_key][cache_lookup] = (data, time.time())", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def write_to_cache(self):\n data = {'data': self.data, 'inventory': self.inventory}\n json_data = json.dumps(data, indent=2)\n\n with open(self.cache_filename, 'w') as cache:\n cache.write(json_data)", "def set(self, key, value):\n key = self.prefix + key\n if self.enabled:\n if self.redis:\n if value is not None:\n self.redis.set (key, self.serializer.dumps (value))\n self.cache[key] = value\n else:\n path = os.path.join (self.cache_path, key)\n with open(path, 'wb') as stream:\n stream.write (self.serializer.dumps (value))\n self.cache[key] = value", "def memcacheSet(self, key, value):\n\n key = base64.b64encode(key)\n if not self._getMemcacheClient().set(key, value, time=self._cacheTimeout):\n log.error(\"Could not write to memcache, retrying\")\n if not self._getMemcacheClient(refresh=True).set(\n key, value,\n time=self._cacheTimeout\n ):\n log.error(\"Could not write to memcache again, giving up\")\n del self.memcacheClient\n raise DirectoryMemcacheError(\"Failed to write to memcache\")", "def set_cache(self, key, value):\n self.r.set(key, value)\n self.r.expire(key, time=1500)", "def write(self, key, value):\n if( self.storage is None ): \n self.storage = {};\n if( value is None ):\n self.storage[ key ] = None;\n del self.storage[ key ];\n else:\n self.storage[ key ] = value;", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def set(self, key, value):\n # Initialize key variables\n result = self.cache.set(key, value)\n\n # Return\n return result", "def write_cache(self, filename=None):\n with open(self._cache_filename(filename), \"wb\") as fh:\n pickle.dump(self, fh)", "def cache_data(name, data):\n cache_path = get_cachefile('%s.cache' % name)\n with open(cache_path, 'wb') as f:\n pickle.dump(data, f)", "def writeData(self, key, value, context):\n print(\"key_{}\".format(key))\n print(\"val_{}\".format(value))\n\n address = _make_benchcontract_address(\"key_{}\".format(key))\n print(\"address: {}\".format(address))\n value_encoded = (\"{}\".format(value)).encode()\n print(\"encoded value: {}\".format(value_encoded))\n context.set_state(\n {address: value_encoded},\n timeout=self.timeout)\n print(\"writeData stored {} --> {} to state\".format(key, value))\n return 0", "def __setitem__(self, key, item):\n with self.__lock:\n cache_entry = CacheEntry(item, self._default_duration)\n log.debug(\"__setitem__: {}\".format(cache_entry))\n self._d[key] = cache_entry", "def save_cache(self, data, URL):\n\n cache_file = self.get_cache_file_path(URL)\n\n if cache_file.parent.exists():\n with open(cache_file, 'wb') as f:\n f.write(data)\n else:\n os.mkdir(cache_file.parent)\n with open(cache_file, 'wb') as f:\n f.write(data)", "def put(self, key, value):\n if key is None or value is None:\n raise TypeError\n\n index = self.__get_cache_set_index(key)\n cache_set = self.cache_sets[index]\n h_key = self.__ensure_hashable_key(key)\n cache_set.put(h_key, value)", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def save(self, data, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n # Create path directory\n if not os.path.isdir(self.cache_path):\n logging.info(\"Creating cache directory at {}\".format(self.cache_path))\n mkpath(self.cache_path, 0o755)\n\n with open(cache_file_path, 'wb') as fp:\n logging.debug(\"Storing result in cache file at {}\".format(cache_file_path))\n pickle.dump(data, fp)\n\n return True", "def _cache_set(self, metric_name, metric):\n encoded_metric_name = self._encode(metric_name)\n key = encoded_metric_name\n value = self.__value_from_metric(metric)\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.put(key, value, dupdata=False, overwrite=True)", "def store(bank, key, data):\n c_key = f\"{bank}/{key}\"\n tstamp_key = f\"{bank}/{key}{_tstamp_suffix}\"\n\n try:\n c_data = salt.payload.dumps(data)\n api.kv.put(c_key, c_data)\n api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error writing the key, {c_key}: {exc}\")", "def _mock_write_cache(self, pipeline, values, cache_key):\n labels = ['full', cache_key]\n\n # Usually, the pcoder will be inferred from `pcoll.element_type`\n pcoder = coders.registry.get_coder(object)\n cache_manager = ie.current_env().get_cache_manager(pipeline)\n cache_manager.save_pcoder(pcoder, *labels)\n cache_manager.write(values, *labels)", "def set(self, key, value):\n if key in self.keys:\n self.remove(key)\n self.keys[key] = None\n self.buffered[key] = value\n self.cache[key] = value\n if self.autocommit:\n commit()", "def store(self, key, value):\n pass", "def saveCacheFile(self):\n with open(self.cachePath, 'w', encoding='utf-8') as outfile:\n json.dump(self.cacheData, outfile)", "def set(key, value):\n return Cache.cache_connector.set(key, value)", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def set_cache(self, key, value):\n self._cache[key] = (self.model.batch_number, value)", "def store(self, key, headers, value):", "def write_to_cache(self):\n return False", "def write_cache(self):\n self.__config.open_file(\n self.__cache_file, \"w\", lambda f: json.dump(self.cache, f)\n )\n self.__dirty = False", "def save(self, key, value):\n # deepcopy so that later modifications to value aren't reflected in the db\n self.data[key] = copy.deepcopy(value)", "def write_cache(feed):\n if ARGV.get(NOCACHE_OPT):\n return\n CACHE['feed'] = feed\n CACHE['last-request'] = str(time.time())\n CACHE['max-age'] = feed.headers['Cache-Control'].split('=')[1]\n save_datfile()", "def put(data):", "def put(self, key, value):\n return self.sp.put(key, value)", "def _append_to_cache_directory(key: str, value: str) -> None:\n directory_json_path = os.path.join(\n CacheManagerSingleton.CACHE_PATH, \"directory.json\"\n )\n with open(directory_json_path, \"r\") as directory_file_read:\n directory_json = json.loads(directory_file_read.read())\n directory_file_read.close()\n directory_json[key] = value\n new_directory_json = json.dumps(directory_json, indent=2)\n with open(directory_json_path, \"w\") as directory_file_write:\n directory_file_write.write(new_directory_json)\n directory_file_write.close()", "def put(self, key, value):\n ret = self.__scaffydb.put(key, value)\n\n if not ret:\n self.driftwood.log.msg(\"ERROR\", \"Database\", \"could not assign value to key\", \"\\\"{0}\\\"\".format(key))\n return False\n\n self.driftwood.cache.upload(\"DB:\"+key, value) # Cache the value.\n self.driftwood.log.info(\"Database\", \"put\", \"\\\"{0}\\\"\".format(key))\n return True", "def put_property_cache(self, data, key, coordinates=None, expires=None, overwrite=True):\n\n try:\n self.definition\n except NodeDefinitionError as e:\n raise NodeException(\"Cache unavailable, %s (key='%s')\" % (e.args[0], key))\n\n if self.property_cache_ctrl is None:\n return\n\n if not overwrite and self.has_property_cache(key, coordinates=coordinates):\n raise NodeException(\"Cached data already exists for key '%s' and coordinates %s\" % (key, coordinates))\n\n with thread_manager.cache_lock:\n self.property_cache_ctrl.put(self, data, key, coordinates=coordinates, expires=expires, update=overwrite)", "def set( key, value, expiry = DEFAULT_CACHING_TIME ):\n if ACTIVE is False:\n return None\n \n global CACHE, STATS_KEYS_COUNT\n if key not in CACHE:\n STATS_KEYS_COUNT += 1\n if expiry != None:\n expiry = time.time() + int( expiry )\n \n try:\n CACHE[key] = ( value, expiry )\n except MemoryError:\n \"\"\" It doesn't seems to catch the exception, something in the GAE's python runtime probably \"\"\"\n logging.info( \"%s memory error setting key '%s'\" % ( __name__, key ) )", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "async def save_to_cache(self, item: T):\n path = self._build_cache_path(\n **{a: getattr(item, a) for a in self._unique_attribues}\n )\n if path.is_file():\n raise ValueError(f\"Trying to overwrite cache at {str(path)}\")\n path.parent.mkdir(parents=True, exist_ok=True)\n async with aiofiles.open(str(path), \"w\") as file:\n await file.write(item.to_json())", "def saveGameToCache(self, theKey, theGame):\n if theGame == None:\n return\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n try:\n theGameFile.createNewFile()\n pw.print_(theGame.serializeToJSON())\n pw.flush()\n pw.close()\n gOut.close()\n fOut.close()\n except Exception as e:\n e.printStackTrace()", "async def put(self, collection, key, data):\n _LOGGER.debug(\"Putting %s to memory\", collection, key)\n if self.databases:\n for database in self.databases:\n await database.put(collection, key, data)", "def __write_cache(self, fileName, returnVal):\n # Cache miss\n if self.__log:\n self.__logger.info(f\"Cache miss: {fileName}\")\n self.__handle_cache_size()\n\n with open(fileName, \"wb\") as f:\n packed = pickle.dumps(returnVal)\n final = self.__handle_compression(packed)\n f.write(final)\n\n node = os.path.relpath(fileName, \"cache\")\n self.__recentAccessed.insert(0, node)", "def put_object(self, bucket_name, key, data):\n url = self.__key_url(bucket_name, key)\n resp = self.infinispan_client.put(url, data=data,\n auth=self.basicAuth,\n headers=self.headers)\n logger.debug(resp)", "def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))", "async def put(self, key, data):\n data = self.convert_object_to_timestamp(data)\n await self.client.set(key, json.dumps(data))", "def _put(self, key: bytes, value: bytes, expire_time_ms: int=None):\n self.db.put(key, pickle.dumps((value, expire_time_ms)))", "def _write_cache(self, path, text):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n folder = os.path.split(cache_path)[0]\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n with io.open(cache_path, 'w', encoding='utf-8') as f:\n f.write(text)", "def write(self, key, value, ttl=None):\n data = {\n 'value': value\n }\n if ttl and ttl > 0:\n data['ttl'] = int(ttl)\n return self._request_key(key, method='put', data=data)", "def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))", "def set(self, key, value):\n # First, look for the key in the cache using `self.get()`\n # If not exists (returns None), add key-value to head\n # If exists, pop old key-value from list, add new value to head\n pass", "def __setitem__(self, tid: int, result: bytes):\n if tid in self:\n raise KeyError(f\"transaction {tid} already cached\")\n\n self._cache[tid] = (result, time.monotonic())", "def put(self, key, val):\n pass", "def __setitem__(self, index, value):\n\t\t# create value and key storage\n\t\tself.filep.write(pack('<LL',len(index), len(value)))\n\t\tself.filep.write(index)\n\t\tself.filep.write(value)\n\n\t\t# grab a hash for the key\n\t\thash = calc_hash(index)\n\n\t\t# dump a new hash into our bucket\n\t\tself.hashbucket[hash % 256].fromlist([hash, self.position_hash])\n\t\tself.position_hash += 8 + (len(index) + len(value))", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def __setitem__(self, key, value):\n self.data[key] = value", "def __setitem__(self, key, value):\n self.data[key] = value", "def put(self, key, value):\n self._store[key] = value", "def cache(self, key, timeout=None):\n original_cache_timeout = self.cache_timeout\n self.cache_timeout = timeout\n self.cache_backend.set(key, self, timeout)\n self.cache_timeout = original_cache_timeout", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def set(self, key, data, expiration=None):\n node = self._get_node(key)\n\n if node:\n node.data = data\n else:\n if len(self) + 1 > self.max_size:\n node = self._pop()\n del self.map[node.key]\n\n node = Node(key, data, expiration)\n self._append(node)\n self.map[key] = node", "def write(self, data):\n\n # TODO - Check bytes vs unicode\n data = _as_bytes(data)\n # block_size = 2**16 = 65536\n data_len = len(data)\n if len(self._buffer) + data_len < 65536:\n # print(\"Cached %r\" % data)\n self._buffer += data\n return\n else:\n # print(\"Got %r, writing out some data...\" % data)\n self._buffer += data\n while len(self._buffer) >= 65536:\n self._write_block(self._buffer[:65536])\n self._buffer = self._buffer[65536:]", "def __setitem__(self, key, value):\r\n self.data[key] = value", "async def _set(self, key, value, ttl=None):\n SimpleMemoryBackend._cache[key] = value\n if ttl:\n loop = asyncio.get_event_loop()\n SimpleMemoryBackend._handlers[key] = loop.call_later(ttl, self.__delete, key)\n return True", "def store_cache(base_url, data, path=\"logs/\"):\n\n # Convert URL to filename and write html content into that file\n url_filename = url_to_filename(base_url)\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"w+\")\n f.write(data)\n f.close()", "def save_data(data, fn, key):\n\n data = xor(data, key)\n\n with open(fn, 'wb') as fh:\n fh.write(data)", "def _put(self, key: str, value):\n pass", "def set_persistent_cache(self, key, value):\n self._persistent_cache[key] = value", "def write(self, addr: str, data: str, state: str = 'E') -> None:\n found = False\n\n # Searching for the cache block\n for block in self.__mem:\n # Check if the block is valid and the memory address is\n # the correct\n if block['address'] == addr:\n block['data'] = data\n block['state'] = state\n found = True\n break\n\n # Check if the block was not in cache\n if not found:\n invalid = False\n\n # Searching for an invalid block\n for block in self.__mem:\n if block['state'] == 'I':\n # Set the data in the invalid block\n block['address'] = addr\n block['data'] = data\n block['state'] = state\n\n invalid = True\n break\n \n # If an invalid block does not exist\n if not invalid:\n # Get a random block\n block = randint(0, self.__size - 1)\n\n # Set the new information\n self.__mem[block] = {\n 'address': addr,\n 'available': Lock(),\n 'data': data,\n 'state': state\n }", "def save(self, key, data):\n overloads = self._load_index()\n try:\n # If key already exists, we will overwrite the file\n data_name = overloads[key]\n except KeyError:\n # Find an available name for the data file\n existing = set(overloads.values())\n for i in itertools.count(1):\n data_name = self._data_name(i)\n if data_name not in existing:\n break\n overloads[key] = data_name\n self._save_index(overloads)\n self._save_data(data_name, data)", "def _write_cache(url, j):\n\n m = hashlib.md5()\n m.update(url)\n with open('.cache.%s' % m.hexdigest(), 'wb') as outfile:\n json.dump(j, outfile)", "def put(self, key, value):\n hash_key = key % self.key_space\n self.hash_table[hash_key].update(key, value)", "def cache_item(self, index: int, data):\n # Unfortunately, we can't put tensors directly in mongo so we must\n # pickle them...\n data_to_store = io.BytesIO()\n torch.save(data, data_to_store)\n data_to_store.seek(0)\n\n self.mongo_database.cache.insert_one(\n {\n \"session_id\": self.session_id,\n \"sample_identifier\": index,\n \"sample\": data_to_store.read(),\n }\n )", "def __setitem__(self, key, value):\n self._data[key] = value", "def set(self, key, value):\n try:\n assert self.capacity > 0\n\n if key not in self.cache:\n node = Node(key, value)\n self.cache[key] = node\n\n self._enqueue(node)\n self.num_elements += 1\n\n if self._full_capacity():\n dequeued = self._dequeue()\n del self.cache[dequeued.key]\n self.num_elements -= 1\n\n else:\n # Overwrite value if the key already exists\n # and re-enqueued to indicate recently used\n self.cache[key].value = value\n self._re_enqueue(self.cache[key])\n except AssertionError as error:\n print(\"WARNING: Can't perform operations on <= 0 capacity cache\")", "def save_cache(self):\n with open(self.get_cache_filename(), 'wb+') as f:\n out = dict()\n out['timestamp'] = self.get_last_update()\n out['cache'] = self.cache\n f.write(pickle.dumps(out))", "def put(self, key, value):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n bucket[i]=(key,value)\n return\n bucket.append((key,value))", "def save(self):\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)", "def __setitem__(self, (essid, key), results):\n if essid not in self.essids:\n raise KeyError(\"ESSID not in store.\")\n filename = os.path.join(self.essids[essid][0], key) + '.pyr'\n with open(filename, 'wb') as f:\n f.write(PYR2_Buffer(essid, results).pack())\n self.essids[essid][1][key] = filename", "def store(self,key,start,end,data):\n\n pass", "def set(key, value):\n instance_cache.set(key, value, expiry=CacheLayers.INSTANCE_SECONDS)\n memcache.set(key, CacheLayers.compress(value))\n\n logging.info(\"Set BingoCache in instance cache and memcache\")", "def add_cache(self, key_path, content, t_mserver):\n\t\tif key_path not in self.cache:\n\t\t\tself.cache[key_path] = {'time_validated': int(time.time()),\n\t\t\t\t\t\t\t\t\t't_mclient' : int(t_mserver),\n\t\t\t\t\t\t\t\t\t'content': content}\t\n\t\t\tprint(\"self.cache[key_path]: \",self.cache[key_path]['content'])", "def write(self, record):\n if not record:\n return\n\n # Convert to a dict - inefficient, I know...\n if type(record) is DASRecord:\n record = json.loads(record.as_json())\n if type(record) is dict:\n # If our local queue is full, throw away the oldest entries\n while self.send_queue.full():\n try:\n logging.debug('CachedDataWriter queue full - dropping oldest...')\n self.send_queue.get_nowait()\n except asyncio.QueueEmpty:\n logging.warning('CachedDataWriter queue is both full and empty?!?')\n\n # Enqueue our latest record for send\n self.send_queue.put_nowait(record)\n else:\n logging.warning('CachedDataWriter got non-dict/DASRecord object of '\n 'type %s: %s', type(record), str(record))" ]
[ "0.75162756", "0.7494944", "0.73132676", "0.72090346", "0.7136881", "0.7136881", "0.7108822", "0.7058452", "0.7058452", "0.70342314", "0.7015688", "0.7011552", "0.6970053", "0.69598436", "0.6954975", "0.6933723", "0.6902636", "0.6839885", "0.6836937", "0.68292135", "0.6814033", "0.67881453", "0.6771964", "0.6759035", "0.67358303", "0.66819257", "0.6633104", "0.66150624", "0.660925", "0.65813124", "0.6548015", "0.65118563", "0.648328", "0.64727014", "0.6463079", "0.6458072", "0.6453789", "0.6445948", "0.6419249", "0.6417888", "0.64156455", "0.64004964", "0.63930756", "0.63466847", "0.6339767", "0.63318795", "0.6331534", "0.6330148", "0.63263834", "0.63230634", "0.6289197", "0.62876767", "0.6281859", "0.6281101", "0.6270022", "0.62595063", "0.6245062", "0.6242561", "0.6235103", "0.6230443", "0.6226365", "0.62216544", "0.62132835", "0.62122184", "0.6203286", "0.6202351", "0.6198333", "0.61964077", "0.619221", "0.6188978", "0.6184166", "0.6168465", "0.6164578", "0.6164578", "0.616452", "0.6163139", "0.6156821", "0.6156361", "0.6141681", "0.6132862", "0.6129319", "0.6127578", "0.6126614", "0.6110155", "0.60968006", "0.6084766", "0.6083166", "0.6081394", "0.6061589", "0.60569924", "0.60557073", "0.60500425", "0.6045303", "0.6019284", "0.6017033", "0.6015379", "0.60101527", "0.59985244", "0.5989439", "0.5984409" ]
0.682781
20
seed users. by defualt set to 5 users
def seed_User(number=5, overwrite=False): if overwrite: print('Overwriting all users') User.objects.all().delete() count = 0 for i in range(number): username = fake.first_name() User.objects.create_user( email=username + "@blogmail.com", password="vns12345", name=username, date_joined=datetime.datetime.now(), is_active=1, is_superadmin=0, avatar='', is_staff=1 ) count += 1 percent_complete = count / number * 100 print( "Adding {} new Users: {:.2f}%".format( number, percent_complete), end='\r', flush=True ) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]", "def create_users(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n role = random.choice([\"shepherd\",\"admin\"])\n password = fake.user_name\n User.objects.create(\n name=name,phone=phone,\n email=email,role=role,\n password=password\n )", "def run_seed(self):\n user = User(username='administrator')\n user.set_password('123456123a')\n user.save()", "def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]", "def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()", "def seed():\n u = User(email=\"[email protected]\", is_admin=False)\n u.set_password(\"foobar123\")\n\n db.session.add(u)\n db.session.commit()", "def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users", "def setUpClass(cls):\n super(EmotionTest, cls).setUpClass()\n user = UserFactory(username='dan', email='[email protected]')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n cls.dan = user\n\n for _ in range(10):\n user = UserFactory.create()\n user.set_password(factory.Faker('password'))\n user.save()", "def user_batch():\n return [\n UserFactory(roles=RoleFactory.create_batch(randint(0, 3)))\n for _ in range(randint(3, 5))\n ]", "def seed_db():\n db.session.add(User(username='Joe', email='[email protected]'))\n db.session.add(User(username='Joe2', email='[email protected]'))\n db.session.commit()", "def load_users():\n\n \n\n User.query.delete()\n\n with open(\"seed_data/seed_users.psv\") as users:\n for row in users:\n username, fname, lname, email, password, user_role = row.strip().split(\"|\")\n\n user = User(username=username,\n fname=fname,\n lname=lname,\n email=email,\n password=generate_password_hash(password),\n user_role=user_role)\n\n db.session.add(user)\n\n db.session.commit()", "def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()", "def setUp(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')", "def seed_users(project_env, runlevel):\n\n db_client_maker = core_db.get_nest_users_sqla_maker()\n md = nest_db.get_global_sqlalchemy_metadata()\n engine = nest_db.get_global_sqlalchemy_engine()\n #note this is a tablelike client, not a NestUser client\n db_client = db_client_maker.get_db_client(engine, md)\n\n #needs a unique *instance* of system_user to act as 'owner' \n #as we will alter the instance that we add to the table\n db_client.set_requesting_user(core_db.get_system_user())\n\n user_configs = nest_config.generate_seed_users(project_env, runlevel)\n \n success = _add_users_from_configs(db_client, user_configs)\n return success", "def create_users(self):\n if self.gl is None:\n print(\"No config found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Users creation.\")\n gl = self.gl\n config = self.config\n for username in config[\"users\"]:\n i = 0\n count = int(config[\"users\"][username][\"count\"])\n pw = config[\"users\"][username][\"pass\"]\n groups = config[\"users\"][username][\"groups\"]\n while i < count:\n i += 1\n print(\"creating user: \" + username + '-' + str(i) + \" ...\", end=' ')\n user = gl.users.create({'email': username + str(i) + '@example.com',\n 'password': pw,\n 'username': username + '-' + str(i),\n 'name': username + '-' + str(i),\n 'skip_confirmation': True})\n self.users.append(user)\n self.usergroups[user.id] = groups\n print(\"done.\")\n print(\"All Users created!\")", "def setUp(self):\n users = []\n users.append(user.User(username=\"username\", name=\"name\", email=\"[email protected]\", password_hash=\"password_hash\", salt=\"salt\", profile_picture=b\"profile_picture\"))\n users.append(user.User(username=\"test\", password_hash=\"iiojfeaioieof\", salt=\"saltySalt\"))\n users.append(user.User(username=\"jeff\", name=\"jeff bob\", password_hash=\"eeeeeeeeeeeeeee\", salt=\"fffffffffffffff\"))\n users.append(user.User(username=\"epicUsername69\", email=\"[email protected]\", password_hash=\"asdfafeadf\", salt=\"graefgafae\"))\n db.create_all()\n for value in users:\n db.session.add(value)\n db.session.commit()", "def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n\n seed()\n for i in range(count):\n u = User(\n username=fake.first_name(),\n email=fake.email(),\n password='password',\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "def _create_and_enroll_users(self, count):\n users = []\n for _ in range(count):\n user = UserFactory()\n CourseEnrollmentFactory.create(user=user, course_id=self.course.id)\n users.append(user)\n return users", "def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n roles = Role.query.all()\n\n seed()\n for i in range(count):\n u = User(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n email=fake.email(),\n password='password',\n confirmed=True,\n role=choice(roles),\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "def populate_database_with_users(\n db_session, extra_username: t.Optional[str] = None\n) -> None:\n for _ in range(0, 3):\n username = create_random_username()\n db_session.add(GifSyncUser(username=username))\n if extra_username:\n db_session.add(GifSyncUser(username=extra_username))\n db_session.commit()", "def setUp(self):\n\n # Allocates users\n self.users = []\n self.user_session_tokens = []\n\n # Template for creating users\n user_template = {\n \"clientId\": 2,\n \"username\": \"user\",\n \"pwd\": \"password\",\n \"nameLast\": \"Last\",\n \"nameFirst\": \"First\",\n \"email\": \"[email protected]\",\n \"phone\": \"123-4567\",\n \"profile_picture_path\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Creates 'n' users and stores them\n n = 3\n for i in range(0, n):\n user = deepcopy(user_template)\n user['username'] += randstr()\n user['email'] += randstr()\n handler.user_create(event=user, context=None)\n self.users.append(user)\n self.user_session_tokens.append(None)", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def setUp(self):\n self.new_users = User(\"Zephon Makale\", \"1234xyz\") #Create User object", "def load_users():\n\n for i, row in enumerate(open('seed_data/users.csv')):\n data = row.rstrip().split(\",\")\n user_id, email, password = data\n\n user = User(user_id=user_id, email=email,\n password=password)\n\n db.session.add(user)\n\n # For testing, just to see it was happening\n # if i % 100 == 0:\n # print i\n\n db.session.commit()", "def seed():\n if User.find_by_identity(app.config['SEED_ADMIN_EMAIL']) is not None:\n return None\n\n user = User(\n role = 'admin',\n email = app.config['SEED_ADMIN_EMAIL'],\n password = app.config['SEED_ADMIN_PASSWORD']\n )\n category = Category(\n name='Red Blend',\n description='',\n parent_id=0,\n owner=1\n )\n region = Region(\n name='Columbia Valley',\n description='',\n parent_id=0,\n country='United States',\n state='Washington',\n owner=1\n )\n wine = Wine(\n name='Test Wine',\n maker='Test Maker',\n vintage='2000',\n category=1,\n region=1,\n owner=1\n )\n\n db.session.add(user)\n db.session.commit()\n db.session.add(category)\n db.session.commit()\n db.session.add(region)\n db.session.commit()\n db.session.add(wine)\n db.session.commit()\n\n return user", "def create_db(num_users=5):\n db.create_all()", "def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()", "def load_users():\n\n print \"Users\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n User.query.delete()\n\n # Read u.user file and insert data\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n user_id, age, gender, occupation, zipcode = row.split(\"|\")\n\n user = User(user_id=user_id,\n age=age,\n zipcode=zipcode)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(user)\n\n # Once we're done, we should commit our work\n db.session.commit()", "def setUp(self):\n # ensure there is no data in the test database when the test starts\n db.session.commit()\n db.drop_all()\n db.create_all()\n usRoles = [\"Guest\",\"Couple\",\"2nd line\",\"Wedding party\"]\n\n for i in usRoles:\n roleAdd = User_roles(role = i)\n db.session.add(roleAdd)\n db.session.commit()\n\n # create test admin user\n admin = User(first_name=\"admin\", last_name=\"admin\",permission=\"Couple\", email=\"[email protected]\", password=\"admin2016\")\n\n # create test non-admin user\n employee = User(first_name=\"test\", last_name=\"user\",permission = \"Guest\", email=\"[email protected]\", password=\"test2016\")\n\n # save users to database\n db.session.add(admin)\n db.session.add(employee)\n db.session.commit()", "def setUp(self):\n with connection.cursor() as cursor:\n # Add some test users\n sql = 'INSERT INTO `'+ USER_TABLENAME +'` (`name`, `surname`, `password_hashed`, `e_mail`, `token`, `about_me`, `job_uuid`, `field_of_study`, `forget_password_ans`) VALUES'\n for test_user in DeleteTest.user_list:\n cursor.execute(sql + \"('\"+\"','\".join([str(x) for x in test_user])+\"');\")", "def test_create_multiple_users(self):\r\n self._auto_auth()\r\n self._auto_auth()\r\n self.assertEqual(User.objects.all().count(), 2)", "def setup(self, session):\r\n user_types = []\r\n user_types.append(UserType(\"ADMIN\", 2))\r\n user_types.append(UserType(\"GUEST\", 0))\r\n user_types.append(UserType(\"USER\", 1))\r\n\r\n user_type_db = UserType.query.all()\r\n\r\n for user in user_type_db:\r\n for seedUser in user_types:\r\n if (user.name == seedUser.name):\r\n user_types.remove(seedUser)\r\n\r\n\r\n for user_type in user_types:\r\n session.add(user_type)\r\n session.commit()\r\n\r\n user = User(\"Admin\", \"[email protected]\", \"SuperSecret\", None, None, True)\r\n user.user_type_id = UserType.get_type_by_name('ADMIN').id\r\n user.activated = True\r\n session.add(user)\r\n ttoken = str(user.id).zfill(10) + str(uuid.uuid4())\r\n #Check if token is unique\r\n while (UserToken.query.filter_by(token=ttoken).count() != 0) :\r\n ttoken = str(user.id).zfill(10) + str(uuid.uuid4())\r\n\r\n ttoken = UserToken(user_id=user.id,token=ttoken)\r\n ttoken.update = datetime.utcnow() - timedelta(hours=4)\r\n ttoken.created = datetime.utcnow() - timedelta(hours=48)\r\n session.add(ttoken)\r\n self.token_list.append(ttoken)\r\n self.user_list.append(user)\r\n session.commit()\r\n\r\n user = User(\"Test\", \"[email protected]\", \"SuperSecret\", None, None, True)\r\n session.add(user)\r\n self.user_list.append(user)\r\n\r\n ttoken = str(user.id).zfill(10) + str(uuid.uuid4())\r\n #Check if token is unique\r\n while (UserToken.query.filter_by(token=ttoken).count() != 0) :\r\n ttoken = str(user.id).zfill(10) + str(uuid.uuid4())\r\n\r\n ttoken = UserToken(user_id=user.id,token=ttoken)\r\n ttoken.update = datetime.utcnow()\r\n ttoken.created = datetime.utcnow()\r\n session.add(ttoken)\r\n self.token_list.append(ttoken)\r\n session.commit()\r\n\r\n yield\r\n session.query(User).delete()\r\n session.query(UserType).delete()\r\n session.query(UserToken).delete()\r\n session.commit()\r\n self.user_list = []\r\n self.token_list = []", "def tearDown(self):\n User.users_list = []", "def populate_db():\n try:\n users = [\n User(name=u'admin', role=1),\n ]\n db.session.add_all(users)\n db.session.commit()\n except:\n db.session.rollback()\n raise Exception(\"Failed to populate the database\")\n finally:\n db.session.close()", "def setUp(self):\n a, b, c = (\n User.objects.create_user(guy, email=\"%[email protected]\" % guy, password=guy)\n for guy in \"abc\"\n )\n a.is_superuser = True\n a.save()", "def create_users(\n self, count=1, password=\"Please bypass hashing!\", activation=False\n ):\n users = []\n for index in range(1, count + 1):\n user = User(\n username=\"sagan{}\".format(index),\n email=\"carlsagan{}@nasa.gov\".format(index),\n password=password,\n registered_date=datetime(2000, 1, 1),\n last_login_date=datetime(2000, 1, 1),\n )\n if activation:\n user.activation = Activation()\n users.append(user)\n if hasattr(self, \"repo\"):\n self.repo.add(user)\n if count == 1:\n return users[0]\n else:\n return users", "def create_fake_data():\n User.create_fake_users()", "def setUp(self):\n with connection.cursor() as cursor:\n # Add some test users\n sql = 'INSERT INTO `'+ USER_TABLENAME +'` (`name`, `surname`, `password_hashed`, `e_mail`, `token`, `about_me`, `job_uuid`, `field_of_study`, `forget_password_ans`) VALUES'\n for test_user in SearchTest.user_list:\n cursor.execute(sql + \"('\"+\"','\".join([str(x) for x in test_user])+\"');\")", "def setUpTestUsers(self) -> None:\n self.password = \"thisisasecret\"\n self.other = get_user_model().objects.create_user(\"other\", password=self.password)\n self.user = get_user_model().objects.create_user(\"user\", password=self.password)\n self.admin = get_user_model().objects.create_superuser(\"admin\", password=self.password)\n self.anonymous = AnonymousUser()", "def user_list():\n for values in USERS:\n user = User.objects.create_user(\n values[\"username\"], values[\"email\"], values[\"password\"]\n )\n user.first_name = values[\"first_name\"]\n user.last_name = values[\"last_name\"]\n user.is_staff = values[\"staff\"]\n user.is_superuser = values[\"super\"]\n user.save()\n Token.objects.create(key=values[\"token\"], user_id=user.id)\n\n # print('users created')", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n first_name=\"John\",\n last_name=\"Kenedy\",\n username=\"johnny\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )\n self.user_2 = User.objects.create_user(\n first_name=\"Kent\",\n last_name=\"Philip\",\n username=\"kenty\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )", "def load_users():\n\n print('load_users')\n\n for row in open(\"seed_data/users.csv\"):\n row = row.rstrip()\n\n email, \\\n postal_code, \\\n fname, \\\n lname, \\\n username, \\\n password, \\\n phone, \\\n role = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code,\n fname=fname,\n lname=lname,\n username=username,\n password=password,\n phone=phone,\n role=role)\n\n db.session.add(usr)\n\n db.session.commit()", "def init():\n create_user(app)\n get_all_user()", "def create_fake_users(count, no_echo):\n users = User.create_fake_users(count=count)\n if not no_echo:\n for user in users:\n print(f'{user[0]}: {user[1]}')", "def load_user():\n\n for i, row in enumerate(open(\"seed_data/role.user\")):\n row = row.rstrip()\n name, description = row.split(\"|\")\n role = RoleModel(name=name, description=description)\n db.session.add(role)\n\n for i, row in enumerate(open(\"seed_data/user.user\")):\n row = row.rstrip()\n name, phone, email, password, confirmed_at, role_id = row.split(\"|\")\n user = UserModel(name=name,\n phone=phone,\n email=email,\n password=password,\n confirmed_at=confirmed_at,\n role_id=role_id)\n db.session.add(user)\n\n # for i, row in enumerate(open(\"seed_data/order.user\")):\n # row = row.rstrip()\n # active, user_id, product_location_id = row.split(\"|\")\n # order = OrderrModel(\n # active=active, \n # user_id=user_id, \n # product_location_id=product_location_id)\n # db.session.add(order)\n\n db.session.commit()", "def _create_random_user(self,startname=\"\",site=None):\n \n username = startname + \"\".join([choice('AEOUY')+\n choice('QWRTPSDFGHHKLMNB')\n for x in range(3)])\n \n data = {'username':username,\n 'email':username+\"@test.com\"}\n \n return self._create_user(data,site)", "def tearDown(self):\n User.user_list = []", "def setUp(self):\n\n User.query.delete()\n\n user = User.register(**TEST_USER_DATA)\n\n db.session.add_all([user])\n db.session.commit()\n\n self.user_id = user.id", "def generate_users(config: Config):\n users_by_id = {}\n users_by_alternative_id = {}\n for user_data in config.users:\n alternative_id = secrets.token_hex()\n user = User(user_data[\"user_id\"], user_data[\"password_hash\"], alternative_id)\n users_by_id[user.id] = user\n users_by_alternative_id[user.alternative_id] = user\n return users_by_id, users_by_alternative_id", "def random_user():\n\tcount = User.objects.count()\n\treturn User.objects.limit(-1).skip(randint(0,count-1)).next()", "def test_0000_initiate_users( self ):\n self.login( email=common.test_user_1_email, username=common.test_user_1_name )\n test_user_1 = self.test_db_util.get_user( common.test_user_1_email )\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role( test_user_1 )\n self.login( email=common.admin_email, username=common.admin_username )\n admin_user = self.test_db_util.get_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role( admin_user )", "def populate_user_data():\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.user.insert_one(\n {\n 'username': 'admin',\n 'password': 'admin',\n }\n )\n print(\"Created an admin account\")\n except Exception as e:\n print(e)", "def test_expected_users(self):\n print()\n print(\"Testing users whose parameters are safe...\")\n for user_id in self.get_unique_ids(100):\n self.store_expected_user(user_id)\n \n User.objects.all().delete()\n print(\"Testing many users whose parameters are safe with bulk_create...\")\n self.store_many_expected_users()\n\n print(\"-\" * 10)", "def users(db):\n users = [UserFactory(), UserFactory()]\n db.session.commit()\n return users", "def seed():", "def setUp(self):\n User.objects.create(email=\"[email protected]\", first_name=\"Test1\", last_name=\"User\")\n User.objects.create(email=\"[email protected]\", first_name=\"Test2\", last_name=\"User\")", "def handle(self, *args, **kwargs):\n seeder = Seed.seeder()\n seeder.add_entity(User, 20)\n\n seeder.add_entity(EmployeeMptt, 20, {\n 'user': lambda x: User.objects.filter(employeemptt=None).first(),\n 'parent': lambda x: EmployeeMptt.objects.order_by(\"?\").first(),\n 'level': lambda x: random.randint(0, 4),\n })\n seeder.execute()", "def sample_user_fifth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name5\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def setUp(self):\n self.user_count = CustomUser.objects.count()\n self.new_student_user = CustomUser.objects.create(**self.Student)\n self.new_instructor_user = CustomUser.objects.create(**self.Instructor)\n self.new_student_user.set_password(\"student12345\")\n self.new_student_user.save()\n self.new_instructor_user.set_password(\"instructor12345\")\n self.new_instructor_user.save()", "def generate():\n db.connection.drop_database(app.config['MONGODB_DB'])\n\n for _ in range(100):\n generate_user()\n\n for _ in range(10):\n generate_api_user()\n\n return json_ok()", "def get_user(number):\n for i in range(number):\n person = Person(\"ru\")\n user = HabrUser(\n username=person.username(template=\"U_d\"),\n email=person.email(domains=(\"yandex.ru\", \"gmail.com\")),\n password=person.password(),\n )\n user.save()", "def test_more_profiles(self):\n\n for x in range(0, 10):\n User.objects.create_user(\n username=\"\".join((\"koalabear\", str(x))),\n email=\"\".join((\"[email protected]\", str(x))),\n password=\"\".join((\"secret\", str(x)))\n )\n\n c = Client()\n response = c.get(reverse('profiles:index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 10)", "def load_sundaes():\n\n print('load_sundaes')\n\n User.query.delete()\n\n for row in open(\"seed_data/sundaes.csv\"):\n row = row.rstrip()\n email, postal_code = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code)\n\n db.session.add(usr)\n\n db.session.commit()", "def users_create():", "def seed_db():\n\n # TODO: Create user\n userNormal = User(username = 'usernormal', password = 'usernormal', role = 1)\n userAdmin = User(username = 'useradmin', password = 'useradmin', role = 10)\n User.insert(userNormal)\n User.insert(userAdmin)\n print('Seed User')\n\n # read accounts.json\n with open('./data/accounts.json') as f:\n accounts_data = json.load(f)\n mongo.db.accounts.insert_many(accounts_data)\n print('Seed Accounts')", "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)", "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)", "def seed_user(filename):\n linecount = 0\n f = open(filename)\n for row in f:\n type(row)\n row = row.rstrip()\n row = row.split(\"|\")\n print linecount\n if len(row) >= 3:\n if not User.query.filter_by(email=row[1]).first():\n user = User(name=row[0], email=row[1], password=row[2])\n try:\n db.session.add(user)\n db.session.commit()\n except:\n db.session.rollback()\n print user.name, \"has insufficient information, skipping.\"\n linecount += 1\n print \"added user\", linecount\n else:\n print \"user already exists\"\n\n print \"Complete.\", linecount, \"users added\"\n f.close()", "def setUp(self):\n db.create_all()\n user = User(\"eschoppik\", \"secret\", \"Elie S\", \"[email protected]\")\n db.session.add(user)\n db.session.commit()", "def example_data_users():\n\n #Empty out existing data\n User.query.delete()\n\n celia = User(user_id=2, first_name='Celia', last_name='Waggoner',\n email=\"[email protected]\", password=\"123\", city=\"San Francisco\",\n state=\"CA\", zipcode='94110',\n amenities_pref=1, cleanliness_pref=5, class_size_pref=10,\n class_schedule_pref=5, class_pace_pref=1)\n pam = User(user_id=3, first_name='Pam', last_name='Geick',\n email=\"[email protected]\", password=\"456\", city=\"Rocky River\",\n state=\"OH\", zipcode='44116',\n amenities_pref=1, cleanliness_pref=1, class_size_pref=1,\n class_schedule_pref=1, class_pace_pref=1)\n amber = User(user_id=4, first_name='Amber', last_name='Lynn',\n email=\"[email protected]\", password=\"789\", city=\"Brooklyn\",\n state=\"NY\", zipcode='11201',\n amenities_pref=10, cleanliness_pref=10, class_size_pref=10,\n class_schedule_pref=10, class_pace_pref=10)\n\n db.session.add_all([celia, pam, amber])\n db.session.commit()", "def reset_all_users():\n for user in User.objects.all():\n user.delete()", "def setup(self, session):\n user_types = []\n user_types.append(UserType(\"ADMIN\", 2))\n user_types.append(UserType(\"GUEST\", 0))\n user_types.append(UserType(\"USER\", 1))\n\n user_type_db = UserType.query.all()\n\n for user in user_type_db:\n for seedUser in user_types:\n if (user.name == seedUser.name):\n user_types.remove(seedUser)\n\n\n for user_type in user_types:\n session.add(user_type)\n session.commit()\n\n user = User(\"GUEST\", \"[email protected]\", \"GUEST\", None, None, True)\n user.user_type_id = UserType.get_type_by_name('GUEST').id\n user.activated = True\n user1 = User(\"USER\", \"[email protected]\", \"USER\", None, None, True)\n user1.user_type_id = UserType.get_type_by_name('USER').id\n user1.activated = True\n user2 = User(\"Admin\", \"[email protected]\", \"SuperSecret\", None, None, True)\n user2.user_type_id = UserType.get_type_by_name('ADMIN').id\n user2.activated = True\n session.add(user)\n session.add(user1)\n session.add(user2)\n session.commit()\n self.userList.append(user)\n self.userList.append(user1)\n self.userList.append(user2)\n\n #make initial project\n session.add(Project.create_project('DummyProject_01'))\n session.add(Project.create_project('DummyProject_02'))\n session.commit()\n\n self.projectList = Project.query.all()\n\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'M11'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'M12'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'M13'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'read'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'write'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'share'))\n session.add(Module(Project.get_project_by_name('DummyProject_01').id, 'own'))\n session.commit()\n\n self.moduleList = Module.query.all()\n\n perm0 = ModulePerm(self.userList[0].id, self.moduleList[0].id, 15)\n perm1 = ModulePerm(self.userList[0].id, self.moduleList[1].id, 8)\n perm2 = ModulePerm(self.userList[0].id, self.moduleList[2].id, 0)\n\n session.add(perm0)\n session.add(perm1)\n session.add(perm2)\n session.commit()\n\n self.permList.append(perm0)\n self.permList.append(perm1)\n self.permList.append(perm2)\n\n yield\n session.query(ModulePerm).delete()\n self.permList[:] = []\n\n session.query(Module).delete()\n session.query(Project).delete()\n session.commit()\n self.moduleList[:] = []\n self.projectList[:] = []\n\n session.query(User).delete()\n session.query(UserType).delete()\n session.commit()\n self.userList[:] = []", "def setUp(self):\n # ensure there is no data in the test database when the test starts\n db.session.commit()\n db.drop_all()\n db.create_all()\n\n # create test admin user\n hashed_pw = bcrypt.generate_password_hash('admin2016')\n admin = Users(first_name=\"admin\", last_name=\"admin\", email=\"[email protected]\", password=hashed_pw)\n\n # create test non-admin user\n hashed_pw_2 = bcrypt.generate_password_hash('test2016')\n employee = Users(first_name=\"test\", last_name=\"user\", email=\"[email protected]\", password=hashed_pw_2)\n\n # save users to database\n db.session.add(admin)\n db.session.add(employee)\n db.session.commit()", "def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")", "def seed_db():\n import cerbereapp.models as models\n con = engine.connect()\n con.execute(models.account_type.insert(), [\n {'Guest'},\n {'Premium'},\n {'Free'}])\n db_session.execute(models.profiles.insert(), [\n {'user_id': 1, 'profile_name' : '1recon'},\n {'user_id': 1, 'profile_name' : '1medic'},\n {'user_id': 2, 'profile_name' : '2recon'},\n {'user_id': 2, 'profile_name' : '2medic'}])\n db_session.commit()", "def add_users():\n try:\n User.objects.get(username='admin').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_superuser(username='admin', password='admin', email='')\n print('> Superuser was created')\n\n try:\n User.objects.get(username='user1').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_user(username='user1', password='user1', email='')\n print('> User (user1) was created')", "def post(self):\n if not util.DEVT:\n abort(404) # very important - dont give users the opportunity to destroy our entire user base\n \n def delete_all():\n ndb.delete_multi(u.User .query().fetch(keys_only=True))\n ndb.delete_multi(u.AuthId.query().fetch(keys_only=True))\n \n def create_admin():\n u.User.create ( username ='admin'\n , email_ ='[email protected]' \n , pwdhash__ =pwd.encrypt('123456')\n , isAdmin_ =True\n , isVerified_=True\n , isActive_ =True\n , authIds =u.randomAuthIds()\n )\n #User.put(admin)\n\n def create_user(n):\n name = 'tutshka%d' % n \n u.User.create ( username =name\n , email_ =name+'@xyz.com'\n , pwdhash__ =pwd.encrypt('123456')\n , isAdmin_ =False\n , isVerified_=random.choice((True, False))\n , isActive_ =random.choice((True, False))\n , bio =random.choice(('All component', 'things are', 'impermanent: work', 'out your', 'own salvation', 'with diligence.'))\n , authIds =u.randomAuthIds()\n )\n #u.addRandomAuthIds()\n #User.put(usr)\n \n delete_all()\n NumUsers = 15\n for n in xrange(NumUsers):\n create_user(n)\n create_admin()\n return ok()", "def setUp(self):\n super(TestCaseWithUsers, self).setUp()\n\n # Creating users\n self.password = 'password1'\n\n UserData = namedtuple('UserData', 'email first_name last_name')\n\n users_data = [\n UserData('[email protected]', 'Some', 'User'),\n UserData('[email protected]', 'Some', 'Admin'),\n UserData('[email protected]', 'Another', 'User'),\n UserData('[email protected]', 'Another', 'Admin'),\n ]\n\n for idx, user_data in enumerate(users_data, start=1):\n attr_name = 'user{}'.format(idx)\n\n self.__setattr__(attr_name, User.objects.create_user(\n first_name=user_data.first_name,\n last_name=user_data.last_name,\n email=user_data.email,\n password=self.password,\n ))", "def seed():\n app_config = current_app.config\n\n if User.find_by_identity(app_config['SEED_ADMIN_EMAIL']) is not None:\n return None\n\n params = {\n 'role': 'admin',\n 'email': app_config['SEED_ADMIN_EMAIL'],\n 'password': app_config['SEED_ADMIN_PASSWORD']\n }\n\n return User(**params).save()", "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)", "def seed_db(db):\n permission = Permission('manage_users')\n permissions.description = 'Can manage users'\n db.session.add(permission)\n\n user = User('Admin', 'User')\n user.email = '[email protected]'\n user.password = User.hash_password('Password123')\n db.session.add(user)\n\n db.session.commit()\n\n permission = Permission.query.filter_by(name='manage_users').first()\n user = User.query.filter_by(email='[email protected]').first()\n\n db.session.add(UserPermission(user.id, permission.id))\n db.session.commit()", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "async def newusers(self, ctx, *, count=5):\n\n count = min(max(count, 5), 25)\n\n now = datetime.now(timezone.utc)\n e = disnake.Embed()\n\n for idx, member in enumerate(\n sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)\n ):\n if idx >= count:\n break\n\n value = \"Joined {0} ago\\nCreated {1} ago\".format(\n pretty_timedelta(now - member.joined_at),\n pretty_timedelta(now - member.created_at),\n )\n e.add_field(name=po(member), value=value, inline=False)\n\n await ctx.send(embed=e)", "def init_default_users():\n from flask import current_app as app\n with app.app_context():\n notion_uname = app.config.get(\"NOTION_CRONJOB_USERNAME\")\n notion_passwd = app.config.get(\"NOTION_CRONJOB_PASSWORD\")\n\n if notion_uname and notion_passwd:\n try:\n User.createOne(\n username=notion_uname,\n password=notion_passwd\n )\n except NotUniqueError:\n app.logger.info(\"Notion Job User already exists!\")\n except Exception as err:\n app.logger.error(\"Notion Job User was not created!\", err)\n else:\n app.logger.info(\"Created Notion Job User Successfully!\")", "def seedUser(user,scrape=False):\n print '*** SEEDING: '+user+' ***'\n if scrape:\n chain(getTwitterUsers.s([user]),getTwitterConnections.si(user), getTwitterConnections.si(user,friends=False), \n getTweets.si(user,maxTweets=1000), startUserScrape.si(user))()\n else:\n chain(getTwitterUsers.s([user]),getTwitterConnections.si(user), getTwitterConnections.si(user,friends=False),\n getTweets.si(user,maxTweets=1000))()", "def setUp(self):\n\n User.query.delete()\n\n user = User.register(**TEST_USER_DATA)\n db.session.add(user)\n\n db.session.commit()\n\n self.user_id = user.id", "def create_users(self):\n allow_all_policy = \"\"\"{\n \"Statement\": [\n {\n \"Action\": \"*\",\n \"Effect\": \"Allow\",\n \"Resource\": \"*\"\n }]\n }\"\"\"\n\n for i in xrange(self.args.account_number):\n account_name = self.args.account_prefix + str(i)\n group_name = self.args.group_prefix + str(i)\n password = self.args.password_prefix + str(i)\n self.tester.iam.create_account(account_name)\n self.tester.iam.create_group(group_name, \"/\", account_name)\n self.tester.iam.attach_policy_group(group_name, \"allow-all\", allow_all_policy, account_name)\n for k in xrange(self.args.user_number):\n user_name = self.args.user_prefix + str(k)\n self.tester.iam.create_user(user_name, \"/\", account_name)\n self.tester.iam.add_user_to_group(group_name, user_name, account_name)\n self.tester.iam.create_login_profile(user_name, password, account_name)", "def setUpTestData(cls):\n countries = [\"MX\", \"CHL\", \"USA\", \"PER\", \"COL\"]\n slack_user_ids = [\"UP0918MAV\", \"UP0918MAV\", \"UP0918MAV\", None, None]\n cls.menu = Menu.objects.create(available_on=date.today())\n for count in range(5):\n user = User.objects.create(username=f\"johny.doe {count}\")\n Employee.objects.create(\n user=user, country=countries[count], slack_user_id=slack_user_ids[count]\n )", "def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')", "def create_base_users(): # TODO: Just call create_user for each\n with engine.connect() as connection:\n\n result = connection.execute(\"select user from pdp_users\")\n user_count = len(result.fetchall())\n if user_count == 0:\n\n print(\"Creating base users\")\n\n pu = sa.Table(\"pdp_users\", metadata, autoload=True, autoload_with=engine)\n\n # user\n pw_hash = user_api.hash_password(BASEUSER_PW)\n ins_stmt = pu.insert().values(\n username=\"base_user\", full_name=\"Base User\", password=pw_hash, active=\"Y\", role=1,\n )\n connection.execute(ins_stmt)\n\n # INactive user\n # Reuse pw hash\n ins_stmt = pu.insert().values(\n username=\"base_user_inact\", full_name=\"Inactive User\", password=pw_hash, active=\"N\", role=1,\n )\n connection.execute(ins_stmt)\n\n # editor\n pw_hash = user_api.hash_password(BASEEDITOR_PW)\n ins_stmt = pu.insert().values(\n username=\"base_editor\", full_name=\"Base Editor\", password=pw_hash, active=\"Y\", role=2,\n )\n connection.execute(ins_stmt)\n\n # admin\n pw_hash = user_api.hash_password(BASEADMIN_PW)\n ins_stmt = pu.insert().values(\n username=\"base_admin\", full_name=\"Base Admin\", password=pw_hash, active=\"Y\", role=9,\n )\n connection.execute(ins_stmt)\n\n else:\n print(user_count, \"users already present in DB, not creating\")", "def setUp(self):\n self.user1 = User.objects.create_user(username='jack', email='[email protected]', password='secret')\n self.user1.first_name = \"Jack\"\n self.user1.last_name = \"Smith\"\n self.user1.save()", "def setUp(self):\n\n User.query.delete()\n\n user = User.register(**TEST_USER_DATA)\n db.session.add(user)\n\n db.session.commit()\n\n self.user = user", "def users(db):\n db.session.query(User).delete()\n\n users = [\n {\n 'role': 'admin',\n 'email': '[email protected]',\n 'password': 'password'\n },\n {\n 'active': False,\n 'email': '[email protected]',\n 'password': 'password'\n }\n ]\n\n for user in users:\n db.session.add(User(**user))\n\n db.session.commit()\n\n return db", "def update_all_users():\n\tfor user in User.query.all():\n\t\tadd_or_update_user(user.name)", "def setUp(self):\n\n self.user = self.client.users.create({})", "def _initialize_users():\n if not USER_ACCOUNTS_PATH.exists():\n raise FileNotFoundError()\n\n with open(str(USER_ACCOUNTS_PATH)) as f:\n user_accounts = json.load(f)\n\n conn, c = _get_db_connection()\n\n c.execute('''DELETE FROM user''')\n\n for user in user_accounts['accounts']:\n c.execute(\"\"\"INSERT INTO user VALUES (?, ?)\"\"\", (user['email'], user['password']))\n\n conn.commit()", "def setUp(self):\r\n\r\n\r\n db.drop_all()\r\n db.create_all()\r\n\r\n u1 = User.signup(\"test1\", \"[email protected]\", \"password\", None)\r\n uid1 = 1111\r\n u1.id = uid1\r\n\r\n u2 = User.signup(\"test2\", \"[email protected]\", \"password\", None)\r\n uid2 = 2222\r\n u2.id = uid2\r\n\r\n db.session.commit()\r\n\r\n u1 = User.query.get(uid1)\r\n u2 = User.query.get(uid2)\r\n\r\n self.u1 = u1\r\n self.uid1 = uid1\r\n\r\n self.u2 = u2\r\n self.uid2 = uid2\r\n\r\n self.client = app.test_client()", "def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")" ]
[ "0.76051784", "0.73858875", "0.7105484", "0.7039456", "0.7026748", "0.70025635", "0.69474685", "0.69027597", "0.68793637", "0.6835339", "0.6813174", "0.6786388", "0.6739348", "0.6732933", "0.6717712", "0.66460043", "0.66320664", "0.6586459", "0.6585731", "0.65754217", "0.65524065", "0.65043545", "0.64892256", "0.6479792", "0.6473599", "0.64275825", "0.6415466", "0.641006", "0.6395737", "0.6383656", "0.63617295", "0.62927675", "0.62888634", "0.62867415", "0.6284497", "0.62616533", "0.6257704", "0.6216904", "0.62154406", "0.6210309", "0.6192479", "0.61877626", "0.61752313", "0.61671114", "0.613889", "0.6108405", "0.60888875", "0.60795534", "0.6073442", "0.60583377", "0.6053875", "0.60536605", "0.6046003", "0.60349035", "0.60290384", "0.6026848", "0.6019959", "0.60176706", "0.60161257", "0.60139453", "0.6011898", "0.59899765", "0.59823716", "0.59747535", "0.5973712", "0.5971398", "0.5967407", "0.59630734", "0.59630734", "0.59400123", "0.59306234", "0.59225005", "0.5919936", "0.5918584", "0.5918094", "0.5909467", "0.5906846", "0.590292", "0.5899249", "0.5888358", "0.5884865", "0.5880964", "0.5880835", "0.5875104", "0.587491", "0.58607197", "0.5850018", "0.5834765", "0.582412", "0.58173984", "0.5815876", "0.58145916", "0.5812663", "0.58099055", "0.5806892", "0.580273", "0.580143", "0.57940614", "0.5791534", "0.5790845" ]
0.7705716
0
__init__ initializes an instance of the BlackBoxGame class
def __init__(self, atoms): self._board = Board.Board(atoms) self._score = 25 self._atoms = self._board.get_atoms() self._guesses = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def __init__(self):\r\n self._board = None\r\n self._bb_settings = Settings()\r\n self._screen = pygame.display.set_mode((self._bb_settings.screen_width,\r\n self._bb_settings.screen_height))\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)\r\n self._image = pygame.image.load('board.bmp')\r\n self._rect = self._image.get_rect()\r\n self._play_mode_button_list = self.make_play_mode_buttons()\r\n self._replay_button_list = self.make_replay_buttons()", "def __init__(self, width, height):\n Game.__init__(self, width, height)", "def __init__(self, screen, game_state=None):\n pass", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameover = False", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def __init__(self):\n self.gameloop()", "def __init__(self):\n\n self._board = Board()", "def __init__(self):\r\n # Screen settings\r\n self.screen_width = 990\r\n self.screen_height = 990\r\n self.bg_color = (115, 204, 0)\r\n self.player_speed = 30\r\n self.enemy_speed = 45\r\n self.bomb_width = 90\r\n self.bomb_height = 90\r\n self.bomb_color = (96,96,96)\r\n self.max_bombs = 1\r\n self.bomb_radius = 45\r\n self.color_1 = (200, 200, 200)\r\n self.color_2 = (0, 0, 0)\r\n self.row_width = self.screen_width / 11\r\n self.col_width = self.screen_width / 11\r\n\r\n self.red_points = 0\r\n self.blue_points = 0\r\n\r\n self.wall_types = {\r\n 'wall': 1,\r\n 'barell': 2,\r\n }", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "def __init__(self):\n\t\tpygame.init()\n\t\tself.settings = Settings()\n\n\t\tself.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\t\tself.settings.screen_width = self.screen.get_rect().width \n\t\tself.settings.screen_height = self.screen.get_rect().height\n\t\tpygame.display.set_caption(\"Pigeon Drop!\")\n\n\t\t# Create an instance to store game statistics,\n\t\t# and create a scoreboard.\n\t\tself.stats = GameStats(self)\n\t\tself.sb = Scoreboard(self)\n\n\t\tself.pigeon = Pigeon(self)\n\t\tself.droppings = pygame.sprite.Group()\n\t\tself.autos = pygame.sprite.Group()\n\n\t\tself._create_fleet()\n\n\t\t# Make the Play button.\n\t\tself.play_button = Button(self, \"Play\")", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"52 Card Trick\")\n self.CardSet = CardSet(self)", "def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0", "def __init__(self):\r\n\t\tself.game_board = [['0','0','0'],['0','0','0'],['0','0','0']]\r\n\t\tself.count = 0\r\n\t\tself.x_turn = True\r\n\t\r\n\r\n\t\tpass", "def __init__ (self, game):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.game_ref = game\r\n self.initialise()", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self):\n self.board = Board()\n self.__root = BinaryNode(Board(), None, Board.PLAYER_1)\n self.player = Board.PLAYER_0\n self.win = False", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def init(self):\n self._last = None\n self._game = None\n self._state = STATE_INACTIVE\n self._message = GLabel(text='Breakout\\n\\nClick To Begin\\n\\nGood Luck',\n font_size=24,x=GAME_WIDTH / 2.0, y=GAME_HEIGHT*(2.0/3.0),\n halign='center', valign='middle', linecolor=colormodel.WHITE)\n self._touch = None\n self._countdownTime = 0\n self._countdownMessage = GLabel(text='3', font_size=40,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)\n self._pausedMessage = GLabel()\n self._sound = True\n self._soundImage = GImage(x=GAME_WIDTH-32, y=0, width=32, height=22,\n source='whitevolumeon.png')\n self._background = GRectangle(x=0, y=0, width=GAME_WIDTH, height=GAME_HEIGHT,\n fillcolor=colormodel.BLACK, linecolor=colormodel.BLACK)", "def __init__(self, turn, game):\n\t\tself.turn = turn\n\t\tself.game = game\n\t\tself.gameBoard = game.returnBoard()", "def __init__(self):\n pygame.init()\n # Assign surface i.e where game elements can be displayed\n self.settings = Settings()\n # Enable Full screen mode\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.settings.screen_width = self.screen.get_rect().width\n self.settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Oyugo's Space Invasion\")\n self.bg_colour = (self.settings.bg_colour)\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.special_bullet = pygame.sprite.Group()\n self.bomb_status = False\n self.stars = pygame.sprite.Group()\n self._create_galaxy()\n self.play_button = Button(self, \"Play\")", "def init_game(self):\n self.blind_manager = BlindManager(hands_per_level=10,\n bots=self.living_bot_names())", "def __init__(self, width, height):\n super().__init__(width, height)\n\n self.ball = Ball()\n self.paddle = Paddle()\n self.score = 0\n\n # These are used to see if the user is\n # holding down the arrow keys\n self.holding_left = False\n self.holding_right = False\n\n arcade.set_background_color(arcade.color.WHITE)", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def __init__(self, game, player):\n self.game = game\n self.player = player", "def __init__(self, ik_game):\r\n self.ik_game = ik_game\r\n self.screen = ik_game.screen\r\n self.screen_rect = self.screen.get_rect()\r\n self.settings = ik_game.configuracoes\r\n self.stats = ik_game.stats\r\n\r\n # Font para dispositive os dados\r\n self.text_color = (250, 250, 250)\r\n self.font = pygame.font.SysFont(None, 48)\r\n\r\n # Inicia o placar inicial\r\n self.prep_placar()\r\n self.prep_placar_score()\r\n self.prep_level()\r\n self.prep_naves()", "def __init__(self):\n self.board = [\n BS, BS, BS, BS,\n BS, BS, BS,\n BS, BS, BS, BS,\n EM, EM, EM,\n WS, WS, WS, WS,\n WS, WS, WS,\n WS, WS, WS, WS\n ]\n self.curr_player = WHITE_PLAYER", "def __init__(self):\n pygame.init()\n pygame.display.set_caption(TITLE)\n self.screen = pygame.display.set_mode(WIN_SIZE)\n\n self.clock = pygame.time.Clock()\n self.score_pos = CENTER_W, BLOCK_H // 2\n\n self.white_bar = pygame.Surface((WIN_W, BLOCK_H))\n self.white_bar.fill((255, 255, 255))\n self.white_bar = self.white_bar.convert()\n self.block = pygame.Surface(BLOCK_SIZE)\n self.block.fill(Color.RED)\n self.red_block = self.block.convert()\n self.block.fill(Color.GREEN)\n self.green_block = self.block.convert()", "def __init__(self, ai_settings, screen, goku):\r\n super(Kame, self).__init__()\r\n self.screen = screen\r\n\r\n # Create kame rect at (0, 0), then set correct position.\r\n self.image = pygame.image.load('kame.bmp')\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = goku.rect.centerx\r\n self.rect.top = goku.rect.top\r\n \r\n # Store a decimal value for the kame's position.\r\n self.y = float(self.rect.y)\r\n\r\n self.color = ai_settings.kame_color\r\n self.speed_factor = ai_settings.kame_speed_factor", "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "def init_game(self):\n nrows = len(self.array)\n self.game_over = False\n self.squares_left = nrows * nrows\n self.bombs_left = 0\n # clear the board\n for i in xrange(nrows):\n for j in xrange(nrows):\n self.array[i][j].reset()\n # put N random bombs\n for i in xrange(nrows):\n rand_num = random.randrange(nrows*nrows)\n if self.array[rand_num / nrows][rand_num % nrows].type \\\n != SquareType.BOMB:\n self.insert_bomb(rand_num / nrows, rand_num % nrows)\n self.squares_left -= self.bombs_left\n self.print_board()", "def __init__(self, game):\n self.game = game\n ConsoleController.__init__(self, None)", "def __init__(self):\n\t\t# Screen size settings\n\t\t# Note that these values are commented out because we're using\n\t\t# full screen mode.\n\t\t#self.screen_width = 1200\n\t\t#self.screen_height = 600\n\n\t\t# Color definitions and background/color setting\n\t\tmidnight_blue = (0, 3, 36)\n\t\tblack = (0, 0, 0)\n\t\twhite = (255, 255, 255)\n\t\tself.bg_color = midnight_blue\n\n\t\tself.bg_image = pygame.image.load('images/space_bg.jpg')\n\n\t\t# Rocket settings\n\t\tself.max_speed = 3\n\t\tself.acceleration = 0.01\n\t\tself.rotation_speed = 3\n\t\t# Starts facing upwards\n\t\tself.rotation_angle = 271\n\n\t\t# Bullet settings\n\t\tself.bullet_speed = 8\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60, 60, 60)\n\t\tself.bullets_allowed = 3", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def __init__(self):\n self._pygameInstance = _pygameInstance()\n self.resetHandlers()", "def __init__(self):\n\t\t# Screen setting.\n\t\tself.screen_width = 1200\n\t\tself.screen_height = 800\n\t\tself.bg_color = (230, 230, 230)\t\n\n\t\t#Ship setting\n\t\tself.ship_speed_factor = 10\t\t\n\t\tself.ship_limit = 3\t\t\t# number ship \n\n\t\t# Bullet setting.\n\t\tself.bullet_speed_factor = 3\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60,60,60) #dark gray bullet\n\t\tself.bullets_allowed = 6\t\t# number bullet in screen\n\n\t\t#Alien setting.\n\t\tself.alien_speed_factor = 3\n\t\tself.fleet_drop_speed = 50\n\t\t# fleet_direction of 1 represents right; -1 represents left. \n\t\tself.fleet_direction = 1\n\n\t\t# Scoring\n\t\tself.alien_points = 50\n\n\t\t# How quickly the game speed ups\n\t\tself.speedup_scale = 1.1\n\t\tself.iniitialize_dynamic_settings()\n\t\t# How quickly score increase.\n\t\tself.score_scale = 1.5", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height)\n )\n pygame.display.set_caption(\"Sideways Shooter\")\n self.stats = GameStats(self)\n self.sideways_ship = SidewaysShip(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()", "def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._damage = 0\n self._gold_cost = 0\n self._health = 0\n self._mana_cost = 0\n self._moves = 0\n self._per_tile = 0\n self._range = 0\n self._title = \"\"", "def __init__(self, box):\n self.is_hidden = False\n self.last_boxes = []\n self.best_box = None\n self.frames_undetected = 0\n self.age = 0\n self.n_frames = 10\n\n self.update(box)", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._bottle = None\n self._cowboy = None\n self._furnishing = None\n self._has_hazard = False\n self._is_balcony = False\n self._tile_east = None\n self._tile_north = None\n self._tile_south = None\n self._tile_west = None\n self._x = 0\n self._y = 0\n self._young_gun = None", "def setup_game(self):", "def __init__(self):\n game_engine = get_gameengine()\n if game_engine is not None:\n self = game_engine\n else:\n ## The targeted frames per second\n self.target_fps = 200\n\n ## The start time\n self.time = time.time()\n\n ## A list of all registered game objects\n self.game_objects = list()\n\n ## A list of colliders\n self.colliders = list()\n\n ## Manage the user inputs\n self.input_manager = InputManager(self)\n\n ## Determines the state of the Game Engine\n self.running = False\n\n ## Variable to pause the Game Engine\n self.paused = False\n\n self.time_node = pm.PyNode('time1')\n # end if", "def __init__(self,player):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.c = config.Config()\n\n\t\tself.image = pygame.image.load(self.c.IMAGE_PATH + \"bomb.png\").convert()\n\t\tself.position = self.image.get_rect()\n\t\tself.position = self.position.move((player.position.x,player.position.y))\n\t\tself.range = player.power\n\t\tself.player = player\n\t\tself.triggered = False", "def __init__(self):\n self.colorPalette = ColorPalette()\n self.frame_number = 0\n\n # initialization of Pygame components\n pygame.init()\n self.__icon = self.__make_icon()\n self.screen_size = (600, 800) # width x height\n self.canvas = pygame.display.set_mode(self.screen_size, 0, 32)\n pygame.display.set_caption(\"Flappy Bird\")\n pygame.display.set_icon(self.__icon)\n self.clock = pygame.time.Clock()\n\n # Initialization of game models\n self.ground = Ground(self.screen_size)\n self.background = Backdrop(self.ground.offset)\n self.bird = Bird(ground_offset=self.ground.offset, y_coord=self.screen_size[1] // 2)\n self.pipes = [PipeSet()]\n self.menu_pipes = [PipeSet()]\n\n self.main_menu_screen = Menu(canvas_dimensions=self.screen_size)\n\n self.player_points = 0\n self.scoreboard = Scoreboard(canvas_dimensions=self.screen_size)\n self.game_over_screen = GameOverMenu(canvas_dimensions=self.screen_size)\n\n # Game control flow manipulation variables\n self.__play_game = False\n self.__just_launched = True\n self.player_dead = False\n self.scroll_speed = 2", "def __init__(self, battle):\r\n self.messageBox = None\r\n BattleScreen.__init__(self, battle)", "def __init__(self, screen, game_settings):\n\t\tself.screen = screen\n\t\tself.color = game_settings.growth_block_color\n\t\t\n\t\t# Sets rect at origin and size 25 x 25\n\t\tself.rect = pygame.Rect(0, 0, 24, 24)\n\t\t\n\t\t# Uses randint to use as random rect position\n\t\tx = randint(0, 35)\n\t\ty = randint(1, 26)\n\t\tself.rect.x = (25 * x) + 1\n\t\tself.rect.bottom = 25 * y", "def __init__(self):\n \n self._wall = BrickWall() \n self._paddle = GRectangle(\n x=GAME_WIDTH/2 - PADDLE_WIDTH/2,\n y=PADDLE_OFFSET,\n width=PADDLE_WIDTH,\n height=PADDLE_HEIGHT,\n fillcolor = PADDLE_COLOR)\n self._clickdist = 0\n self._ball = Ball() \n self._last = None\n self._tries = 2\n self._lostlife = False", "def __init__(self) -> None:\n self.win = self.__init_window()\n self.BACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"background\", \"background.png\")), (WIDTH, HEIGHT))\n self.highscore = 0\n self.gamemode = Gamemodes.startscreen\n self.clock = pygame.time.Clock()", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60.0\n self.keys = pg.key.get_pressed()\n self.done = False\n self.player = Player((50,875), 4)\n self.level = pg.Surface((1000,1000)).convert()\n self.level_rect = self.level.get_rect()\n self.viewport = self.screen.get_rect(bottom=self.level_rect.bottom)\n self.win_text,self.win_rect = self.make_text()\n self.obstacles = self.make_obstacles()", "def __init__(self, width, height):\n super().__init__(width, height)\n arcade.set_background_color(arcade.color.SMOKY_BLACK)\n\n self.held_keys = set()\n\n \n # TODO: declare anything here you need the game class to track\n self.ship = Ship()\n self.asteroid_array = []\n self.bullets_list = []\n self.create_asteroids()", "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode((DisplayConsts.SCREEN_WIDTH, DisplayConsts.SCREEN_HEIGHT))", "def __init__(self, ai_game):\r\n\t\tsuper().__init__()\r\n\t\tself.screen = ai_game.screen\r\n\t\tself.settings = ai_game.settings\r\n\t\tself.screen_rect = ai_game.screen.get_rect()\r\n\r\n\t\t# Load the ship image and get its rect.\r\n\t\tself.image = pygame.image.load('images/ship_counter.bmp')\r\n\t\tself.rect = self.image.get_rect()", "def __init__(self):\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n\n self.black_checkers = []\n self.white_checkers = []\n\n logger.info(u'Initialized checkerboard {}'.format(self))", "def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()", "def __init__(self):\n pygame.init() # intializes background settings\n self.settings = Settings()\n\n # the self.screen obj creates a `surface` that represents game screen where elements can be drawn\n ### run in 1200 x 800 mode\n self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height)) \n \n ### run in fullscreen mode\n # self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n # self.settings.screen_width = self.screen.get_rect().width\n # self.settings.screen_height = self.screen.get_rect().height\n\n pygame.display.set_caption(\"Alien_Invasion\")\n\n # Create instance of game statistics & scoreboard\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n\n # the self.ship instance is assigned to give Ship access to all game resourses via self parameter\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group() # similar to a list with extra features\n\n # create instance of alien\n self.aliens = pygame.sprite.Group()\n self._create_fleet()\n\n # Create a Play button\n self.play_button = Button(self, \"Play !\")", "def __init__(self, game, left, right, bottom, top, col=\"black\"):\n\n # Assign given attributes (ensuring order of coordinates)\n self.game = game\n self.canvas = game.canvas # canvas to draw self on\n self._left = min(left, right)\n self._right = max(left, right)\n self._bottom = min(bottom, top)\n self._top = max(bottom, top)\n self.color = col\n\n # Draw the block\n self._draw()", "def __init__(self):\n self._current_state = \"UNFINISHED\"\n self._start_color = \"RED\"\n self._board = Board()", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n\n #self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n #self.settings.screen_width = self.screen.get_rect().width\n #self.settings.screen_height = self.screen.get_rect().height\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height ))\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Create an instance to store game stats.\n self.stats = GameStats(self)\n # Create scoreboard\n self.scoreboard = Scoreboard(self)\n\n # Create objects\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()\n\n # Make play button\n self.play_button = Button(self, \"Play\")", "def init_new_game(self):\n self.game = get_new_game(self.game_config)", "def __init__(self):\n brick = []\n colour = [colormodel.RED, colormodel.RED, colormodel.ORANGE,\n colormodel.ORANGE,colormodel.YELLOW, colormodel.YELLOW,\n colormodel.GREEN,colormodel.GREEN, colormodel.CYAN,\n colormodel.CYAN]\n \n for vert in range(BRICK_ROWS):\n color = colour[vert]\n \n for hor in range(BRICKS_IN_ROW):\n x= 2 + (BRICK_WIDTH/2) + ((GAME_WIDTH / BRICKS_IN_ROW)*hor) \n y= TOP_Y - ((BRICK_HEIGHT+BRICK_SEP_V)*vert)\n brick.append(self.setBrick(x,y,color))\n \n print 'There are '+str(len(brick))+' brick(s) in this game.' \n \n self._bricks = brick\n self._paddle = self.getPaddle()\n self._ball = None\n self._tries = NUMBER_TURNS\n self._score = 0", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._client_type = \"\"\n self._creatures = []\n self._lost = False\n self._name = \"Anonymous\"\n self._opponent = None\n self._reason_lost = \"\"\n self._reason_won = \"\"\n self._time_remaining = 0\n self._total_health = 0\n self._won = False", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._acted = False\n self._dash_x = 0\n self._dash_y = 0\n self._energy = 0\n self._genarium = 0\n self._is_busy = False\n self._job = None\n self._legendarium = 0\n self._moves = 0\n self._mythicite = 0\n self._owner = None\n self._protector = None\n self._rarium = 0\n self._shield = 0\n self._x = 0\n self._y = 0", "def __init__(self):\n # Initialisation de la bibliothèque Pygame\n pg.init()\n\n # Création de l'écran principal de taille 640px x 480px\n self.screen = pg.display.set_mode((settings.WIDTH, settings.HEIGHT))\n\n # Chargement et collage du fond\n self.background = pg.image.load(settings.BACKGROUND).convert()\n self.screen.blit(self.background, (0, 0))\n\n # Création d'une variable indiquant si le jeu est en cours\n self.running = False\n\n # On termine cette méthode d'initialisation par une mise à jour de \n # l'écran principal\n pg.display.update()", "def main():\r\n\r\n pygame.init()\r\n pygame.display.init()\r\n\r\n # Set the pygame clock\r\n clock = pygame.time.Clock()\r\n\r\n pygame.display.set_caption(\"Blackbox game\")\r\n current_game = BlackBoxGame()\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n current_game.check_events()\r\n clock.tick(60)\r\n current_game.update_screen()\r\n\r\n pygame.quit()", "def __init__(self, board):\n self.running = True\n self.state = \"waiting\"\n pygame.init()\n pygame.display.set_caption(\"Sudoku Solver\")\n\n self.define_grid()\n self.define_number_positions()\n self.define_button()\n self.board = board\n self.font = pygame.font.Font('ubuntu.ttf', NUMBERS_SIZE)\n self.sleep_time = 1 / CHANGES_PER_SECOND\n\n self.original_board = board.copy()", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.Coins =Group()\n self.players = Group()\n self.player1 = Player(1525,75,2)\n self.players.add(self.player1)\n if self.playernum == 2:\n self.player2 = Player(75,825,1)\n self.players.add(self.player2)\n else:\n self.player2 = False", "def __init__(self, username, password, bot, channel):\n super().__init__(username, password)\n\n self.queue = deque()\n self.ingame_cog = Ingame(bot)\n\n self.bot = bot\n self.channel = channel\n self.chat_breakout = False\n self.loop = asyncio.get_event_loop()\n self.ingame_cog.is_pycraft_instance = True", "def __init__(self):\n\n pyxel.init(windowWidth, windowHeight)\n\n # generates randomly ordered list of [0, 1, 2, 3, 4, 5, 6, 7]\n self.bag = sample(list(range(7)), 7)\n\n # generates a block from last element of self.bag into self.blocks\n self.block = Block(blockData[self.bag.pop()])\n\n pyxel.run(self.update, self.draw)", "def __init__(self, game):\n self.__game = game\n\n # Init root window\n self.__root = Tk()\n self.__root.title(\"Ristinolla\")\n self.__root.resizable(width=False, height=False)\n self.__root.configure(bg=Color.MID_TONE)\n\n # Interface components\n self.__infobar = InfoBar(self.__root)\n self.__buttonbar = ButtonBar(self.__root, self)\n self.__tilegrid = TileGrid(self.__root, game, self)\n\n # Pack things up\n self.__buttonbar.pack(\n padx=(Pad.BORDER_PADDING, Pad.BORDER_PADDING),\n pady=(Pad.BORDER_PADDING, Pad.BORDER_PADDING),\n fill=X,\n )\n self.__tilegrid.pack(\n padx=(Pad.GRID_PADDING, Pad.GRID_PADDING),\n pady=(0, 0),\n )\n self.__infobar.pack(\n padx=(Pad.BORDER_PADDING, Pad.BORDER_PADDING),\n pady=(Pad.BORDER_PADDING, Pad.BORDER_PADDING),\n fill=X,\n )", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "def setUp(self):\n self.game = TTTBoard(3)", "def __init__(self, ss_game):\n super().__init__()\n self.screen = ss_game.screen\n self.settings = ss_game.settings\n\n # Load the alien image and set its rect attribute.\n self.image = pygame.image.load('images/alien_ship.png')\n self.rect = self.image.get_rect()\n\n # Start each new alien at a random position on the right side\n # of the screen.\n self.rect.left = self.screen.get_rect().right\n # The farthest down the screen we'll place the alien is the height\n # of the screen, minus the height of the alien.\n alien_top_max = self.settings.screen_height - self.rect.height\n self.rect.top = randint(0, alien_top_max)\n\n # Store the alien's exact horizontal position.\n self.x = float(self.rect.x)", "def __init__(self, game: models.Game):\n self.game = game", "def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False", "def __init__(self, ai_game):\n\t\tsuper().__init__()\n\t\tself.screen = ai_game.screen\n\n\t\t# load the virus image and set its rect attribute\n\t\tself.image = pygame.image.load('images/virus.png')\n\t\tself.image = pygame.transform.scale(self.image, (50, 50))\n\t\tself.rect = self.image.get_rect()\n\n\t\t# start each new virus near the3 top left of the screen\n\t\tself.rect.x = self.rect.width\n\t\tself.rect.y = self.rect.height\n\n\t\t# store the virus' exact horizontal position\n\t\tself.x = float(self.rect.x)", "def __init__(self, player, gamestate):\n self.player = player\n self.gamestate = gamestate", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5", "def __init__(self, width, height, title):\n ## INIT FUNCTION ##\n super().__init__(width, height, title)\n\n ## APPENDING THE SPRTIES ##\n self.shape_list = None\n self.num_key = 0\n\n self.win = arcade.load_texture(\"Numbers/won.png\")\n self.lost = arcade.load_texture(\"Numbers/lost.png\")\n\n # Define variables to check for completeness and accuracy\n self.done = False\n self.correct = False\n self.incorrect = False\n\n self.current_selected = None\n\n # If continuing saved game, convert strings from saved game file to lists and set equal to self.grid and self.fixed_answer\n if new == False:\n self.fixed_answer = Cameron.str_to_list(answer)\n self.grid = Cameron.str_to_list(progress)\n # If starting new game, generate unique board and save solution to text file\n elif new == True:\n self.board = SuDoku(SIZE, (DIV_ROW, DIV_COL), difficulty)\n self.answer = self.board.get_solution()\n self.grid = self.board.get_puzzle()\n self.fixed_answer = self.answer\n\n ## GENERATES BACKGROUND ##\n arcade.set_background_color(arcade.color.BLACK)\n self.recreate_grid()", "def __init__(self):\n\n # Window starting position\n x = 200\n y = 30\n os.environ[\"SDL_VIDEO_WINDOW_POS\"] = \"%d,%d\" % (x, y)\n\n pygame.init()\n # Init window\n self.window = Window()\n # Flag that defines if the program is running or not\n self.running = True\n if Settings.MENU_ENABLED:\n self.main_menu = MainMenu(self.window)\n self.main_loop()", "def __init__(self):\n self._pos = Vector2(250, 250)\n self._color = (randint(0, 255), randint(0, 255), randint(0, 255), 255)\n\n self._ticks_alive = 0\n self._dead = False", "def __init__(self):\n\n self.score = 0\n self.game_over = False\n # Create sprite lists\n self.block_list = pygame.sprite.Group()\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the block sprites\n for i in range(50):\n block = Block()\n block.rect.x = random.randrange(SCREEN_WIDTH)\n block.rect.y = random.randrange(-300, SCREEN_HEIGHT)\n\n self.block_list.add(block)\n self.all_sprites_list.add(block)\n\n self.player = Player()\n self.all_sprites_list.add(self.player)", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.screen = None\n self.engine = None\n self.engines = []\n self.i_e = InputEngine()\n self.e_e = EventEngine(self.i_e)", "def __init__(self):\n self.board = Board()\n #self.player1 = player1\n #self.player2 = player2\n self.winner = None" ]
[ "0.7896587", "0.7875345", "0.7770364", "0.7609544", "0.7590157", "0.75064385", "0.7486412", "0.73641217", "0.7349547", "0.7331177", "0.72932756", "0.72914916", "0.72420853", "0.72406125", "0.7235098", "0.72264135", "0.72215843", "0.7206377", "0.7200916", "0.71846515", "0.71485543", "0.710209", "0.70915926", "0.708712", "0.7081876", "0.70797014", "0.70778817", "0.7070457", "0.7069426", "0.7052748", "0.7030845", "0.6991678", "0.6970932", "0.6969144", "0.69585747", "0.69575423", "0.69366014", "0.6932687", "0.69140196", "0.69075495", "0.68997204", "0.68865883", "0.6879433", "0.6878621", "0.6865895", "0.6861704", "0.68540144", "0.68532187", "0.6838574", "0.6837322", "0.68371546", "0.6826296", "0.68240124", "0.6818378", "0.6774694", "0.6773854", "0.6773703", "0.67663103", "0.6764218", "0.67630273", "0.67597634", "0.675008", "0.6749758", "0.6747746", "0.6739166", "0.6737343", "0.67338747", "0.67329377", "0.67312455", "0.6714092", "0.67103803", "0.67102027", "0.6705418", "0.6704058", "0.6703717", "0.6701862", "0.66878664", "0.667608", "0.6670493", "0.6658866", "0.6654804", "0.664551", "0.66428214", "0.6640067", "0.6639462", "0.6639433", "0.6631666", "0.6630346", "0.6626176", "0.6623539", "0.66203064", "0.66190207", "0.6615943", "0.66143936", "0.6612043", "0.6611641", "0.6610879", "0.6604357", "0.6603932", "0.6602885", "0.66011226" ]
0.0
-1
get_board returns copy of the game's _board object
def get_board(self): return self._board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_board(self):\n return self.board.copy()", "def get_board(self):\n return copy.deepcopy(self.board)", "def get_game_board(self):\n return self.board", "def get_board(self):\r\n return self.board", "def get_board(self):\n return self.board", "def get_board(self):\n return self._board.get_board()", "def get_board(self):\n pass", "def getBoard(self):\n return self.board", "def get_board(self):\n\n return self._board", "def get_board(self):\n return self.squares", "def board(self):\n return copy.deepcopy(self._board)", "def test_get_board(self):\n copy1 = self.game.get_board()\n self.assertEqual(copy1._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n copy2 = self.game.get_board()\n self.assertEqual(copy2._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n copy3 = self.game.get_board()\n self.assertEqual(copy3._board, self.game._board)", "def get_game(self):\n return self._game_board", "def get_board(self) -> Tuple[Tuple[chr]]:\n # If we return the list, then the caller could modify the board,\n # so we want to convert it to a tuple so it's immutable\n return tuple(tuple(row) for row in self._board)", "def get_board_copy(self):\n board_copy = Board()\n board_copy._current_side_color = self._current_side_color\n board_copy._other_side_color = self._other_side_color\n board_copy._rubrics = copy.deepcopy(self._rubrics)\n\n # populate the dict with the copies of the objects:\n for x in range(8):\n for y in range(8):\n piece = board_copy._rubrics[x][y]\n if piece.piece_type != PieceType.PLACEHOLDER:\n board_copy._pieces[piece.color][piece.name] = piece\n\n return board_copy", "def board(self):\r\n return Board(self)", "def board(self):\r\n return Board(self)", "def board(self):\r\n return Board(self)", "def board(self):\r\n return Board(self)", "def board(self):\r\n return Board(self)", "def copy(self):\r\n copy_board = Board(self._squareCount, self._pebbleCount)\r\n copy_board.squares = [list(row) for row in self.squares]\r\n return copy_board", "def get_board():\r\n try:\r\n get_board_property('BOARD')\r\n except:\r\n logging.info(\"board property not found\")\r\n return -1", "def get_board(self):\n output_board = [[self.search(x, y).output() for x in range(self.width)] for y in range(self.height)]\n return output_board", "def load(self):\n data = self.db.select_single_row(\n \"SELECT board_class, width, height, tile_size FROM board\")\n self.board.columns = int(data[1])\n self.board.rows = int(data[2])\n self.board.tile_size = int(data[3])\n self.board._loaded_from_db = True\n self.board.switch_board(self.board)\n return self.board", "def copy(self):\n return type(self)(self.game_board.copy(), self.current_piece)", "def board(self):\n board = []\n if self.flop:\n board.extend(self.flop)\n if self.turn:\n board.append(self.turn)\n if self.river:\n board.append(self.river)\n return tuple(board) if board else None", "def __deepcopy__(self, memodict={}) -> 'Board':\r\n squares: Dict[Pos2D, Square] = deepcopy(self.squares)\r\n round_num: int = self.round_num\r\n phase: GamePhase = self.phase\r\n winner: PlayerColor = self.winner\r\n\r\n return Board(squares, round_num, phase, winner)", "def clone(self):\n copy = Board(self.game)\n for old_piece in self.game_pieces:\n copy.game_pieces.append(old_piece.clone())\n \n return copy", "def get_tile(self, row, col):\n # replace with your code\n return self.board[row][col]", "def relative_board(self):\n board = self.game.board\n if self.player_name == 'A':\n return board\n # Revert the board\n return board[6:] + board[:6]", "def get_game(self):\n return MasterMindBoard()", "async def my_board(self, pn):\n board = self.boards[pn]\n return board.lines()", "def copy(self):\r\n\t\tnewBoard = BoardClass()\r\n\r\n\t\tfor row in self.board:\r\n\t\t\tnewBoard.board.append(row[:])\r\n\t\tnewBoard.x = self.x\r\n\t\tnewBoard.y = self.y\r\n\t\tnewBoard.heuristic = self.heuristic\r\n\t\tnewBoard.n = self.n\r\n\t\tnewBoard.hType = self.hType\r\n\t\tnewBoard.steps = self.steps\r\n\r\n\t\treturn newBoard", "def getGrid(self):\n\n return self.board", "def getResult(board, action):\n result_board = deepcopy(board)\n player = getPlayer(board)\n result_board[action[0]][action[1]] = player\n return result_board", "def get(self, index):\n return self.board[index]", "def get_board(board_id):\n all_boards = [board for board in GRAPH_DB.find(\"board\")]\n board = filter(lambda b: b._id == board_id, all_boards)[0]\n return {\"ladders\": from_hackerrank_paths(board[\"ladders\"]),\n \"snakes\": from_hackerrank_paths(board[\"snakes\"])}", "def getMove(self, board):\n pass", "def compute_board(self):\n return self._compute_board", "def GetBoard(self):\n if self._board is None:\n self._board = self.LsbReleaseValue(\n key='CHROMEOS_RELEASE_BOARD', default=None)\n return self._board", "def __repr__(self):\n return f'Board({ self.board !r})'", "async def _boardgetter(self, global_board: bool) -> None:\n if global_board:\n self.cached_global_leaderboard = await AocGlobalLeaderboard.from_url()\n else:\n self.cached_private_leaderboard = await AocPrivateLeaderboard.from_url()", "def show_board(self):\n print(self.game_board)", "def copy(self):\n\t\tb = Board(self.size, self.end_count)\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tb.tiles[x][y] = self.tiles[x][y]\n\t\treturn b", "def get_board(self, request):\n try:\n player = Player.query(Player.name == request.player_name).get()\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n board = Board.query(Board.player == player.key and Board.game == game.key).get()\n\n if not board:\n raise endpoints.NotFoundException(\n 'The Players Board for the selected game is not found')\n gameutils.log_board_on_console(board)\n except ValueError:\n raise endpoints.BadRequestException('please verify the information '\n 'of the second player')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return StringMessage(message='Board Found and printed in the console'.format(request.player_name))", "def display_board(self):\n print(self.game_board)", "def boards(self):\r\n return Boards(self)", "def copy(board):\r\n height = len(board)\r\n width = len(board[0])\r\n\r\n copyBoard = createBoard(width, height)\r\n for row in range(height):\r\n for col in range(width):\r\n copyBoard[row][col] = board[row][col]\r\n return copyBoard", "def get_board(self, id: str = None, name: str = None, *args) -> en.Board:\n \n if id != None and name != None:\n raise MondayClientError('too_many_parameters', \"Unable to use both 'id' and 'name' when querying for a board.\")\n if id == None and name == None:\n raise MondayClientError('not_enough_parameters', \"Either the 'id' or 'name' is required when querying a board.\")\n if id != None: \n return self.get_board_by_id(id)\n else:\n return self.get_board_by_name(name)", "def copy(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tnewBoard = createBoard(height, width)\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tnewBoard[row][col] = board[row][col]\r\n\treturn newBoard", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_all_boards():\n return [board for board in GRAPH_DB.find(\"board\")]", "def board(self, board_id):\r\n return Board(self, board_id)", "def get_tile(self, row, col):\r\n\r\n return self._board[row][col]", "def result(board, action):\n # if board[action[0]][action[1]] != EMPTY:\n # raise ValueError\n \n copy_board = copy.deepcopy(board)\n copy_board[action[0]][action[1]] = player(copy_board)\n return copy_board", "def copy_board(self):\n board_copy = [[' '] * 20 for _ in range(20)]\n for i in range(20):\n for j in range(20):\n board_copy[i][j] = self._board[i][j]", "def result(board, action):\n copyboard=copy.deepcopy(board)\n i,j=action\n if(copyboard[i][j]!=EMPTY):\n raise Exception(\"invalid action\")\n else:\n copyboard[i][j]=player(board)\n return copyboard\n raise NotImplementedError", "def result(board, action):\n # Ensure manipulations of hypothetical board don't alter current board values\n possible_board = copy.deepcopy(board)\n current_player = player(possible_board)\n\n # Generate boards for all possible moves by current player\n if action in actions(possible_board):\n possible_board[action[0]][action[1]] = current_player\n return possible_board\n\n raise Exception(\"Invalid move.\")", "def get(self, layer, row, column):\n if layer < 0 or row < 0 or column < 0:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))\n try:\n return self._state['visible']['board'][layer][row][column]\n except:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))", "def get(self):\n # 8 timesteps, 6 piece types per player, 64 squares #FIXME: 1 timestep\n # 1 castling (which rooks can still castle)\n # 1 player color (1 if white, 0 if black)\n # 1 total move count\n # 1 moves without progress\n # TODO: add repetions (2): repetition count for that position (3 repitions is an autmatic draw)\n pieces = np.concatenate(self.boards)[::-1]\n pieces = np.concatenate(pieces)\n if len(pieces) == MAX_PIECE_INDEX:\n return pieces\n else:\n return np.concatenate((pieces, np.zeros(MAX_PIECE_INDEX-len(pieces), )))", "def clone(self):\n \n return TTTBoard(self.dim, self.reverse, self.board)", "def result(board, action):\n i, j = action\n new_board = copy.deepcopy(board)\n if board[i][j]:\n raise ValueError\n else:\n new_board[i][j] = player(board)\n return new_board", "def get_board(self):\n for i in range(20):\n for j in range(20):\n print(self._board[i][j], end='|')\n print()", "def getMove(self, board):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def result(board, action):\n if board[action[0]][action[1]] != EMPTY:\n raise RuntimeError(\"Invalid action on board\")\n else:\n player_id = player(board)\n new_board = copy.deepcopy(board)\n new_board[action[0]][action[1]] = player_id\n return new_board", "def result(board, action):\n try:\n if action in actions(board):\n copy_board = copy.deepcopy(board)\n i, j = action\n player_turn = player(board)\n copy_board[i][j] = player_turn\n print(copy_board)\n return copy_board\n else:\n raise IndexError\n except IndexError:\n print(\"Invalid move\")", "def clone(self):\n return TTTBoard(self._dim, self._reverse, self._board)", "def get_state(self):\n return np.append(self.game.game_board.get_board(),\n [self.game.player_1.color, self.game.player_2.color])[None, :]", "def api_print_board(self):\n print(self.board)", "def get_tile(self, row, col):\r\n value = self.board[row][col]\r\n return value", "def copy_board(self, temp_board):\r\n board2 = []\r\n\r\n for ele in temp_board:\r\n board2.append(ele)\r\n\r\n return board2", "def get_piece(self, row, column):\n return self._board[row][column]", "def _check_board(self):\n return self.game_board.check_board(self.tetrino_set)", "def printBoard(self):", "def result(board, action):\n i, j = action\n\n # Deepcopy so that original board is not affected as it will be needed for recursion\n resultBoard = copy.deepcopy(board)\n\n if resultBoard[i][j] is not EMPTY:\n raise InvalidMoveException\n \n resultBoard[i][j] = player(board)\n return resultBoard", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def set_board(board):", "def result(board, action):\n x = action[0]\n y = action[1]\n if x < 0 or x > 2 or y < 0 or y > 2 or not board[x][y] == EMPTY:\n raise ValueError\n temp_board = deepcopy(board)\n temp_board[x][y] = player(board)\n return temp_board", "def result(board, action):\n some_board = deepcopy(board)\n if terminal(some_board):\n return some_board\n if not action[0] in range (0,3) or not action[1] in range(0,3):\n raise NotImplementedError\n elif some_board[action[0]][action[1]] != EMPTY:\n raise NotImplementedError\n else:\n some_board[action[0]][action[1]] = player(some_board)\n return some_board", "def __str__(self):\r\n return str(self.board)", "def result(board, action):\n if action not in actions(board):\n raise Exception\n\n # Deep copy used because it's a list of lists\n board_copy = copy.deepcopy(board)\n board_copy[action[0]][action[1]] = player(board)\n return board_copy", "def result(board, action):\n board_copy = copy.deepcopy(board)\n board_copy[action[0]][action[1]] = player(board)\n\n return board_copy", "def result(board, action):\n board_cp=copy.deepcopy(board)\n turn=player(board_cp)\n x = action[0]\n y = action[1]\n if board_cp[x][y] != None:\n raise ActionInvalidError\n board_cp[x][y] = turn\n\n return board_cp", "def __init__(self, board):\n self.board = board", "def get(self,row,col):\r\n return self.puzzle[row][col]", "def get_game_cell(self, row, col):\n try:\n return GameCell.objects.get(game=self, row=row, col=col)\n except GameCell.DoesNotExist:\n return None", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def copy(self):\r\n board = []\r\n for row in self.board:\r\n board.append([x for x in row])\r\n return Puzzle(board)", "def __init__(self, turn, game):\n\t\tself.turn = turn\n\t\tself.game = game\n\t\tself.gameBoard = game.returnBoard()", "def your_board():\n board = [\n [5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\n ]\n return board", "def __getitem__(self, k):\n return self._board[k]", "def get_square(self, row, col):\n\n return self.board[row][col]", "def get_winner(self, board):\r\n ids = self.player_ids\r\n board_size = self.board_size\r\n return self.get_winner_c(board, ids, board_size)", "def get_next_board(self, delta: Delta) -> 'Board':\r\n\r\n # Sanity checks to ensure we're calling the method correctly.\r\n assert (self.squares[delta.move_target.pos].state == SquareState.OPEN)\r\n if (delta.move_origin is not None):\r\n assert (self.squares[delta.move_origin.pos].state\r\n == SquareState.OCCUPIED)\r\n\r\n next_board: Board = self.__deepcopy__()\r\n\r\n # Make sure that both the original and target squares are changed to\r\n # reflect change in the given delta.\r\n if (delta.move_origin is None):\r\n # This delta is a placement.\r\n next_board.squares[delta.move_target.pos].occupant = \\\r\n Piece(delta.player)\r\n else:\r\n # This delta is a movement.\r\n next_board.squares[delta.move_target.pos].occupant = \\\r\n delta.move_origin.occupant\r\n next_board.squares[delta.move_origin.pos].occupant = None\r\n next_board.squares[delta.move_origin.pos].state = SquareState.OPEN\r\n\r\n next_board.squares[delta.move_target.pos].state = SquareState.OCCUPIED\r\n\r\n # Update all the squares that had a killed piece.\r\n for pos in delta.killed_square_positions:\r\n next_board.squares[pos].occupant = None\r\n next_board.squares[pos].state = SquareState.OPEN\r\n\r\n for square in delta.eliminated_squares: # TODO: Make eliminated squares and new_corners also lists of positions instead?\r\n next_board.squares[square.pos].occupant = None\r\n next_board.squares[square.pos].state = SquareState.ELIMINATED\r\n\r\n for square in delta.new_corners:\r\n next_board.squares[square.pos].occupant = None\r\n next_board.squares[square.pos].state = SquareState.CORNER\r\n\r\n # Update the game state.\r\n next_board.round_num += 1\r\n next_board._update_game_phase()\r\n\r\n return next_board", "def get_boardjob(cls, board_id, job_id):\n\n try:\n return cls.query.filter_by(board_id=board_id, job_id=job_id).one()\n except orm.exc.NoResultFound:\n return None\n except orm.exc.MultipleResultsFound:\n db.session.delete(cls.query.filter_by(board_id=board_id, job_id=job_id).first())\n db.session.commit()\n return cls.query.filter_by(board_id=board_id, job_id=job_id).one()", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def utility(board):\n return utility_map[winner(board)]", "def get_move(self, board, color_to_play):\n move = self.MCTS.get_move(board, color_to_play, self.n_simualtions_per_move, self.exploration)\n self.update(move)\n return move" ]
[ "0.85879415", "0.8494425", "0.8370379", "0.8286644", "0.8171239", "0.81531906", "0.8135133", "0.80969197", "0.7896076", "0.784677", "0.76426995", "0.7624993", "0.7480111", "0.7113091", "0.6975937", "0.68170214", "0.68170214", "0.68170214", "0.68170214", "0.68170214", "0.6770016", "0.6745387", "0.6738528", "0.67180943", "0.6620454", "0.6581623", "0.6569444", "0.65632427", "0.6557972", "0.6545434", "0.65311027", "0.65182376", "0.6500214", "0.64985883", "0.6458161", "0.6440319", "0.64226335", "0.6420336", "0.6415822", "0.6396294", "0.63763326", "0.63714546", "0.63573104", "0.63432807", "0.6340305", "0.6295524", "0.6282976", "0.625391", "0.62282926", "0.62267417", "0.6226602", "0.6226602", "0.6181274", "0.6177995", "0.6168873", "0.615843", "0.61462456", "0.613918", "0.6137095", "0.6127051", "0.6112473", "0.61063105", "0.61007017", "0.6095364", "0.6090187", "0.608517", "0.60815805", "0.60762537", "0.6059621", "0.6043483", "0.60326993", "0.60183424", "0.6008758", "0.59780264", "0.5965057", "0.59565455", "0.5945409", "0.5944174", "0.59420466", "0.5933244", "0.5930666", "0.5925991", "0.5923917", "0.5920457", "0.59197897", "0.5916913", "0.59123963", "0.59119844", "0.5909512", "0.5907566", "0.5906468", "0.5904633", "0.5901277", "0.58941936", "0.58929867", "0.5879262", "0.58790547", "0.58773106", "0.58494663" ]
0.8100259
8
get_score returns the current score
def get_score(self): return self._score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def getScore(data):\n return score", "def get_score(self):\n return self.__score", "def get_score(self):\r\n return self.lcp.get_score()", "def getScore(self):\r\n return self._score", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def get_score(self):\r\n return None", "def get_scores(self):\n return self.score", "def getScore(self):\n return self._score", "def get_score(self):\n\n return self._score", "def get_score(self, score_index) -> float:\n return self._scores[score_index - 1]", "def get_score(self) -> int:\n return self.rstate.score()", "def get_score(self):\n return float(self._score)", "def score(self):", "def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def get_score(self, a, b):\n ### FILL IN ###", "def getScore(self,board):\n return board.getScore()[self.tile]", "def get_r_score(self):\n return self.r_score", "def get_score(self):\n return tuple(self.score)", "def score(self):\n return None", "def get_g_score(self):\n return self._g_score", "def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1", "def disp_score():", "def test_get_score(self):\r\n\r\n score_dict = self.get_score(True, 3, 3)\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(score_dict[\"score\"], 1.0)\r\n\r\n # Testing score after data is stored in student_data_for_location in xmodule.\r\n _score_dict = self.peer_grading.get_score()\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(_score_dict[\"score\"], 1.0)", "def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def score(self) -> int:\n return self._score", "def update_score():\n pass", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def getScore(self, i):\n return self.scores[i - 1]", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5", "def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def score(self) -> str:\n return self._score", "def scoreEvaluationFunction(currentGameState):\n\treturn currentGameState.getScore()", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def scoring(self):\n pass", "def score(self):\n raise NotImplementedError()", "def getSubmissionScore(submission):\r\n return submission.score", "def get_score(cfg):\n key = (cfg.mut, cfg.pH)\n return lazy_load(SCORE_MAP, key, read_score, get_score_path, cfg)", "def get_current_score(self):\n\n # Return the player's current turn score\n return self._current_score", "def get_r2_score(self):\n return self.r2_score", "def get_score(self, student_answers):\r\n pass", "def get_score(self):\r\n max_score = None\r\n score = None\r\n\r\n #The old default was None, so set to 1 if it is the old default weight\r\n weight = self.get_weight()\r\n if self.is_scored:\r\n # Finds the maximum score of all student attempts and keeps it.\r\n score_mat = []\r\n for i in xrange(0, len(self.task_states)):\r\n # For each task, extract all student scores on that task (each attempt for each task)\r\n last_response = self.get_last_response(i)\r\n score = last_response.get('all_scores', None)\r\n if score is not None:\r\n # Convert none scores and weight scores properly\r\n for z in xrange(0, len(score)):\r\n if score[z] is None:\r\n score[z] = 0\r\n score[z] *= float(weight)\r\n score_mat.append(score)\r\n\r\n if len(score_mat) > 0:\r\n # Currently, assume that the final step is the correct one, and that those are the final scores.\r\n # This will change in the future, which is why the machinery above exists to extract all scores on all steps\r\n scores = score_mat[-1]\r\n score = max(scores)\r\n else:\r\n score = 0\r\n\r\n if self._max_score is not None:\r\n # Weight the max score if it is not None\r\n max_score = self._max_score * float(weight)\r\n else:\r\n # Without a max_score, we cannot have a score!\r\n score = None\r\n\r\n score_dict = {\r\n 'score': score,\r\n 'total': max_score,\r\n }\r\n\r\n return score_dict", "def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)", "def updateScore(score):\n return score + 1", "def scoreEvaluationFunction(gameState):\n return gameState.getScore()", "def score():\n factor = 10\n current = (qno - wrong - 1) * factor\n return current", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")" ]
[ "0.8582769", "0.8516198", "0.8516198", "0.8516198", "0.8295523", "0.82583547", "0.8242513", "0.816506", "0.805192", "0.80405056", "0.8029793", "0.8004031", "0.7996733", "0.783279", "0.77552474", "0.7745425", "0.76711756", "0.7645722", "0.7591186", "0.7586955", "0.753339", "0.75226", "0.7481852", "0.74591094", "0.7400067", "0.7365106", "0.7358815", "0.7350135", "0.7348656", "0.7298728", "0.72827446", "0.72780097", "0.7269153", "0.72654015", "0.7261748", "0.72371745", "0.7209892", "0.7209892", "0.7209892", "0.7209892", "0.7209892", "0.7209892", "0.7207961", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.7194169", "0.719162", "0.7177647", "0.7171014", "0.71635616", "0.7149911", "0.7138595", "0.71336985", "0.7130173", "0.7113545", "0.71113884", "0.7103779", "0.708466", "0.70831144", "0.7074944", "0.7071922", "0.70671195", "0.70662636", "0.7065014", "0.7027623", "0.701955", "0.69995725", "0.6997117" ]
0.8245853
8
set_score increments the score by change can be negative
def set_score(self, change): self._score = self._score + change
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_score(self,score):\n self._score = score", "def set_score(self, score):\n self._score = score", "def update_score():\n pass", "def set_score(self, a, b, score):\n ### FILL IN ###", "def set_score(self, score):\n # Update the score display\n self.score = score\n self._prep_score()\n\n # Update the high score if required\n if self.score > self.high_score:\n self.high_score = score\n self._prep_high_score()", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def updateScore(self, score):\n self.__score += score", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def updateScore(score):\n return score + 1", "def set_score(self, score_index: int, score: float) -> None:\n self._scores[score_index - 1] = score", "def increase_score(self):\n self.score += 1", "def change_score(self, change: float=1):\n self._score += change", "def change_score(self, change: float = 1):\n self._score += change", "def setScore(self, i, score):\n self.scores[i - 1] = score", "def increase_score(self, increase):\n if increase > 0:\n self.__score += increase", "def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score", "def score(self, score: str):\n\n self._score = score", "def set_score(self, points):\n self.score += points", "def adjust_score(self):\n self.score += game.temporary_score", "def reset_score(self):\n self._score = p.params['initial_score']", "def add_score(self, score):\n self._score += score", "def update_score(self, board):\n self._score += 1", "def setScore(self, score=None):\r\n self._score = score\r\n self.ids[\"_scoreDisplayer\"].displayScore(score) \r\n self.ids[\"_emailSender\"]._score = self._score\r\n self.ids[\"_scoreSaver\"]._score = self._score\r\n self.ids[\"_MidiPlayer\"]._score = self._score", "def update_score(score, role):\n if role == 'winner':\n score = score + 1\n if role == 'loser':\n score = score - 1\n return score", "def set_rewards_score(self, _score: Address) -> None:\n if self.msg.sender == self.owner:\n self._rewards_score.set(_score)", "def update_g_score(self, value):\n self.g_score = value", "def min_score(self, score):\n self._evaluated = False\n self._min_score = score\n return self", "def __init__(self, score=0):\n self.score = score", "def set_score(self, score):\n self._score_bar.config(text=\"Score: \" + format(score))", "def reset_score(self):\n self.x_score = 0\n self.o_score = 0", "def __init__(self, score = 0):\n self.score = score", "def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score", "def scores(self, value):\n self._scores = value", "def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)", "def __init__(self):\r\n self.score = 0", "def set_dividends_score(self, _score: Address) -> None:\n if self.msg.sender == self.owner:\n self._dividends_score.set(_score)", "def _tally(self, score):\n self._score[self._turn] += score", "def decrease(self):\n self.score -= self.score", "def _adjust_score(self, my_choice, their_choice):\n self._score += p.params['score_matrix'][my_choice][their_choice]\n self._score -= p.params['loss_per_tick']", "def set_score(self, proxy, score):\n #print(\"proxy can be use: %s\" % proxy)\n mapping = { proxy : score }\n return self.database.zadd(self.key, mapping)", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def set_token_score(self, _score: Address) -> None:\n if self.msg.sender == self.owner:\n self._token_score.set(_score)", "def update_score(best_score: int, new_score: int) -> int:\n if new_score > best_score:\n return new_score\n else:\n return best_score", "def increase_score(self):\n\n old_score = self.get_score()\n new_score = old_score + 1\n sql = \"UPDATE Users SET score = ? WHERE username = ?\"\n self.conn.execute(sql, (new_score, self.username))\n self.conn.commit()", "def setHighScore(self, score):\n if (self.__highScore < score):\n self.__highScore = score\n return True\n else:\n return False", "def update_score(self, node, addToScore):\r\n current_score = 0\r\n score_string = self.parser.getAttribute(node, 'gravityScore')\r\n if score_string:\r\n current_score = int(score_string)\r\n\r\n new_score = current_score + addToScore\r\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def update_score(self):\n td = self.created - datetime.datetime(1970, 1, 1)\n epoch_seconds = td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n order = math.log(max(abs(self.points), 1), 10)\n sign = 1 if self.points > 0 else -1 if self.points < 0 else 0\n seconds = epoch_seconds - 1134028003\n self.score = round(order + sign * seconds / 45000, 7)", "def update_score(self, node, addToScore):\n current_score = 0\n score_string = self.parser.getAttribute(node, 'gravityScore')\n if score_string:\n current_score = int(score_string)\n\n new_score = current_score + addToScore\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def add_to_score(self, to_add):\n self.score += to_add", "def add_score(self, score_to_add):\n self.score += score_to_add\n if self.score // self.level >= 20:\n self.level += 1\n self.speed *= self.factor\n # Also redraw all pixels because they now change color\n self.screen.grid()\n self.screen.block()\n self.screen.next()\n # Refresh the data on screen\n self.screen.data()", "def score(self):", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def commit_score(self):\n\n # Update the player's total score and total roll count\n self._total_score += self._current_score", "def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))", "def update_score(self, mark):\n if mark == 'X':\n self.model.game_score[self.model.player_1] += 1\n else:\n self.model.game_score[self.model.player_2] += 1", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def setTreasureScore(self, scores):\n if not self.hasLocalToon: return\n self.notify.debug(\"setTreasureScore: %s\" % scores)\n\n for i in range(len(self.scorePanels)):\n self.scorePanels[i].setScore(scores[i])", "def update_score(self, score_point: int):\r\n self._score_point = score_point\r\n self._update_score() # change the visual display of points for the player\r", "def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)", "def score(self, score: FAIRResultCommonScore):\n if score is None:\n raise ValueError('Invalid value for `score`, must not be `None`') # noqa: E501\n\n self._score = score", "def update(self, game):\n super().update(game)\n self.nn_def.set_score(self.score)", "def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def _score_has_changed(self):\n print('The score for {} has changed'.format(self.team))\n self.relay_controller.activate_solenoid()", "def l_point(self):\n self.l_score += 1\n self.update()", "def double_score_exec(self):\n if self.double_score_count <= 5:\n self.score_multiple = 2\n else:\n self.score_multiple = 1\n self.double_score_count = 0\n self.double_score_exist = False\n self.double_score_start = False", "def r_point(self):\n self.r_score += 1\n self.update()", "def update_activity_points(self, user_id,score):\n \n print(\"score :\"+str(score))\n\n if score<0:\n self.execute(TABELLE['activity_points']['update']['loose'],(score,user_id,))\n else:\n self.execute(TABELLE['activity_points']['update']['win'],(score, user_id,))", "def set_scores(self, scores):\n self.score = {k: v for k, v in scores.items()}", "def score_minus_one():\r\n # TODO: Avoid duplicated code with score_plus_one; have\r\n # both call a single add_to_score function.\r\n postid = request.values.get(\"postid\")\r\n con = get_db()\r\n con.execute(\"\"\"\r\n UPDATE posts SET score=score-1 WHERE postid=?;\r\n \"\"\",\r\n (postid,))\r\n con.commit()\r\n con.close()\r\n return redirect(url_for(\"display_top\"))", "def update_score_from_cmd(self, new_score, prev_score):\r\n if new_score is None:\r\n return # No change\r\n \r\n player = new_score[0]\r\n score = new_score[1]\r\n player.set_score(score)", "def increment_score(self, x=1):\n self.score += x\n styled_set_label_text(self.score_display, \"Score: \"+str(self.score))", "def increase(self, points):\n self.score += points", "def __convert_similarity_score(self, score):\n if 0 < score <= 1:\n new_score = (score - 1) * (-1)\n elif score <= 0:\n new_score = abs(score) + 1\n return new_score", "def score_update(scoreboard, compare):\r\n if compare == 'Victory':\r\n scoreboard['W'] += 1\r\n elif compare == 'Defeat':\r\n scoreboard['L'] += 1\r\n elif compare == 'Tie':\r\n scoreboard['T'] += 1", "def perform_set_score(responder, options):\n match = options['<match-id>']\n tla = options['<tla>']\n score = options['<score>']\n scores.set_match_score(match, tla, score)\n responder('Scored {0} points for {1} in match {2}'.format(score, tla, match))", "def check_high_score(self):\r\n if self.stats.score > self.stats.high_score:\r\n self.stats.high_score = self.stats.score\r\n self.prep_placar_score()", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def updateScore(self, node, addToScore):\n currentScore = 0\n scoreString = node.attrib.get('gravityScore')\n if scoreString:\n currentScore = int(scoreString)\n \n newScore = currentScore + addToScore\n node.set(\"gravityScore\", str(newScore))", "def add_score(self, points: int) -> None:\n self.__score += points\n\n for rank in self.__ranks.keys():\n if self.__score >= rank:\n self.__level = self.__ranks[rank]\n else:\n break", "def fix_score(self,req):\n if self.kind in (\"album\",\"artist\"):\n self.update_score()\n req.message=\"score reset from child scores\"\n elif self.kind==\"track\":\n self.score=0\n for i in self.Play.list(page=self.uid):\n self.score+=i.times\n self.flush()\n req.message=\"score reset from plays table\"\n else:\n req.error= \"not a track, album, or artist\"\n return self.view(req)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n if not self.score and self.id:\n self._compute_score()", "def clan_score(self, clan_score):\n\n self._clan_score = clan_score", "def applyScore(player, scoreEnum, dice):\n\n player.setScore(scoreEnum, getScoreGivenDice(scoreEnum, dice))\n player.removeFromRemaining(scoreEnum)", "def __call__(self, score, model):\n if self.best_score is None:\n # assign the best score and save the model at the end of the first epoch\n self.best_score = score\n self.save_checkpoint(model)\n elif score < self.best_score + self.delta:\n # if the score not increase of at least delta, increment the counter and if it reach the patience early stops\n self.counter += 1\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n # otherwise the score is better that the saved one, so replace the best score and save the model\n self.best_score = score\n self.save_checkpoint(model)\n self.counter = 0", "def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score", "def record_latest_score(self, score):\r\n self.child_history[-1]['score'] = score", "def updateScore(self, player: int) -> None:\n\n if player == 1:\n self._score[0] += 1\n elif player == 2:\n self._score[1] += 1\n\n # logging\n logger.info(\"Player {winner} has scored a goal. Score: {score}\", winner=player, score=str(self._score))", "def _score(self, x, seq):\n pass", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def match_score(self, match_score):\n\n self._match_score = match_score", "def set_score(self):\n if self.PotTax_intervention is None:\n if self.PotTax_reference is not None:\n self.score = (((self.PotTax_reference.sum().TFI - 29.33) /\n 1.4349) / 100)\n else:\n print(\"There is no Biosafe output to score\")\n return\n else:\n self.score = (((self.PotTax_intervention.sum().TFI - 29.33) /\n 1.4349) / 100)\n return", "def _handle_score(self) -> None:\n\n assert self._puck is not None\n assert self._score_regions is not None\n\n # Our puck might stick around for a second or two\n # we don't want it to be able to score again.\n if self._puck.scored:\n return\n\n region = ba.getcollision().sourcenode\n index = 0\n for index in range(len(self._score_regions)):\n if region == self._score_regions[index].node:\n break\n\n for team in self.teams:\n if team.id == index:\n scoring_team = team\n team.score += 1\n\n # Tell all players to celebrate.\n for player in team.players:\n if player.actor:\n player.actor.handlemessage(ba.CelebrateMessage(2.0))\n\n # If we've got the player from the scoring team that last\n # touched us, give them points.\n if (scoring_team.id in self._puck.last_players_to_touch\n and self._puck.last_players_to_touch[scoring_team.id]):\n self.stats.player_scored(\n self._puck.last_players_to_touch[scoring_team.id],\n 100,\n big_message=True)\n\n # End game if we won.\n if team.score >= self._score_to_win:\n self.end_game()\n\n ba.playsound(self._foghorn_sound)\n ba.playsound(self._cheer_sound)\n\n self._puck.scored = True\n\n # Kill the puck (it'll respawn itself shortly).\n ba.timer(1.0, self._kill_puck)\n\n light = ba.newnode('light',\n attrs={\n 'position': ba.getcollision().position,\n 'height_attenuated': False,\n 'color': (1, 0, 0)\n })\n ba.animate(light, 'intensity', {0: 0, 0.5: 1, 1.0: 0}, loop=True)\n ba.timer(1.0, light.delete)\n\n ba.cameraflash(duration=10.0)\n self._update_scoreboard()", "def test_set_score_scores(self, credit_dict, result):\n self.xblock.credit_dict = credit_dict\n self.xblock.set_score()\n self.assertEqual(self.xblock.score, result)", "def multiplyScore(self, multiplier):\n self.__score *= 1 + ( multiplier / 10 )\n self.__score = int(self.__score)", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def _update_score(self, score_msg, queuekey, system):\r\n _ = self.system.service(self, \"i18n\").ugettext\r\n new_score_msg = self._parse_score_msg(score_msg, system)\r\n if not new_score_msg['valid']:\r\n # Translators: the `grader` refers to the grading service open response problems\r\n # are sent to, either to be machine-graded, peer-graded, or instructor-graded.\r\n new_score_msg['feedback'] = _('Invalid grader reply. Please contact the course staff.')\r\n\r\n # self.child_history is initialized as []. record_latest_score() and record_latest_post_assessment()\r\n # operate on self.child_history[-1]. Thus we have to make sure child_history is not [].\r\n # Handle at this level instead of in record_*() because this is a good place to reduce the number of conditions\r\n # and also keep the persistent state from changing.\r\n if self.child_history:\r\n self.record_latest_score(new_score_msg['score'])\r\n self.record_latest_post_assessment(score_msg)\r\n self.child_state = self.POST_ASSESSMENT\r\n else:\r\n log.error((\r\n \"Trying to update score without existing studentmodule child_history:\\n\"\r\n \" location: {location}\\n\"\r\n \" score: {score}\\n\"\r\n \" grader_ids: {grader_ids}\\n\"\r\n \" submission_ids: {submission_ids}\").format(\r\n location=self.location_string,\r\n score=new_score_msg['score'],\r\n grader_ids=new_score_msg['grader_ids'],\r\n submission_ids=new_score_msg['submission_ids']\r\n )\r\n )\r\n\r\n return True" ]
[ "0.82878745", "0.8127291", "0.81113905", "0.8077499", "0.7966757", "0.796316", "0.796316", "0.796316", "0.79581094", "0.78421885", "0.7841102", "0.7821862", "0.78135276", "0.77125627", "0.7695137", "0.76055825", "0.75447255", "0.75010866", "0.7482682", "0.7436947", "0.74270767", "0.73933923", "0.7379702", "0.7360062", "0.72873527", "0.7276128", "0.72540087", "0.7168964", "0.7163298", "0.70986295", "0.7089039", "0.70742553", "0.70731777", "0.70393765", "0.7035791", "0.6983704", "0.6975537", "0.69727933", "0.695832", "0.69505686", "0.6908432", "0.6894788", "0.68764275", "0.6850725", "0.684004", "0.6832775", "0.68199676", "0.6812262", "0.681184", "0.68029743", "0.6788747", "0.6787589", "0.6787461", "0.678161", "0.6754265", "0.6733406", "0.6724694", "0.6718375", "0.6711779", "0.6711216", "0.67022866", "0.6693061", "0.66606843", "0.6650348", "0.66411895", "0.6635688", "0.66307765", "0.66290075", "0.66132945", "0.66045475", "0.6581651", "0.65761524", "0.65760124", "0.65652794", "0.65610754", "0.6520514", "0.6504211", "0.6498353", "0.6486531", "0.6485681", "0.64852494", "0.6484468", "0.64756674", "0.64738494", "0.6471405", "0.6445493", "0.64426804", "0.64366853", "0.64352554", "0.64195263", "0.64097023", "0.64089715", "0.63877493", "0.6380495", "0.63796085", "0.6378424", "0.63741827", "0.6370222", "0.63664186", "0.6338" ]
0.830666
0
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
def move_ray(self, ray): # look to the next spot in the ray's trajectory next_coordinates = ray.get_next_location() next_location = self._board.get_board_square(next_coordinates) # check for a collisition - return if it occurs if ray.check_for_collision(next_location): return # if we didn't collide as we moved we need to look to check our # diagonals for atoms ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals() ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates) cw_diagonal = self._board.get_board_square(cw_diag_coordinates) if ccw_diagonal.is_atom() or cw_diagonal.is_atom(): # If we're on our first move and the immediately diagonals contain an atom we have a reflection if ray.get_current_location() == ray.get_origin_location(): terminal_square = self._board.get_board_square( ray.get_current_location()) # let's the ray know it's finished and the square that it's an endpoint # self.end_ray(ray, terminal_square) return ray.record_edge_collision(terminal_square) # otherwise they cause a bend in the path else: # we have to calculate our trajectory based on the pull # of the atoms in our path ray.recalculate_trajectory(ccw_diagonal, cw_diagonal) # get the coordinates of the next location in our new trajectory next_coordinates = ray.get_next_location() # determine the next coordinate will result in a collision - return if it would if ray.check_for_collision( self._board.get_board_square(next_coordinates)): return # move the ray to the next step forward in its current trajectory ray.set_current_location(next_coordinates) # finally, recursively call our current function from the next step in its path. self.move_ray(ray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entrance position\n if not ray.can_continue(self.get_a_locations()):\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None\n # while there is no atom in front of ray and ray will not exit board --\n while ray.can_continue(self.get_a_locations()):\n ray.check_diags(self.get_a_locations())\n # moves ray forward one space\n ray.advance()\n # if ray will exit board by advancing --\n if not ray.on_board():\n # adjusts score if entrance/exit do not match prior entrances/exits\n self.mark_portal(ray.get_start(), ray.get_pos())\n # changes state to lose if score is now <= 0\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n # returns tuple of exit location\n return tuple(ray.get_pos())\n # if ray is blocked by atom --\n if not ray.no_atom(self.get_a_locations()):\n # changes state to lost if score is now <= 0\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None", "def follow(ray: Ray, scene: Scene, max_iters=1000, renderer=None) -> [Tuple[Ray, Decision]]:\n path = [(ray, Decision.EMIT)]\n idx = 0\n last_ray = ray\n while ray.is_alive:\n intersections = scene.intersections(ray.position, ray.direction)\n points, nodes = zip(*[(x.point, x.hit) for x in intersections])\n for ray, decision in step(ray, points, nodes, renderer=renderer):\n path.append((ray, decision))\n if points_equal(ray.position, last_ray.position) and np.allclose(ray.direction, last_ray.direction):\n raise TraceError(\"Ray did not move.\")\n last_ray = ray\n if idx > max_iters:\n raise TraceError(\"Ray got stuck.\")\n return path", "def step(ray, points, nodes, renderer=None):\n container, to_node, surface_node = ray_status(ray, points, nodes)\n min_point = ray.position\n max_point = points[0]\n \n dist = distance_between(min_point, max_point)\n _ray = ray\n for (ray, decision) in trace_path(ray, container, dist):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n\n if to_node is None and container.parent is None:\n # Case: Hit world node; kill ray here.\n ray = replace(ray, is_alive=False)\n yield ray, Decision.KILL\n elif points_equal(ray.position, max_point):\n # Case: Hit surface\n # NB The ray argument of `trace_surface` *must* be a ray on the surface of the \n # node and the returned ray must *not* be on the node!\n before_ray = ray\n _ray = ray\n for ray, decision in trace_surface(ray, container, to_node, surface_node):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n # Avoid error checks in production\n if __debug__:\n local_ray = ray.representation(surface_node.root, surface_node)\n if surface_node.geometry.is_on_surface(local_ray.position):\n logger.warning(\"(before) pos: {}\".format(before_ray.position))\n logger.warning(\"(after) pos: {}\".format(ray.position))\n raise TraceError(\"After tracing a surface the ray cannot still be on the surface.\")", "def shoot_ray(self, origin_row, origin_column):\n\n # get the the square object at row x column\n origin = self._board.get_board_square((origin_row, origin_column))\n\n # check that it is a valid \"edge\" to send a ray from\n origin_check = origin.is_edge()\n\n # if it's not then return false\n if origin_check == False:\n return False\n\n # if we pass the origin check create shoot a new Ray.Ray object from row x column\n new_ray = Ray.Ray(origin_row, origin_column)\n\n # let the square we shot from know its an orign square\n origin.set_originating_ray(new_ray)\n # Deduct 1 from the score since we now have on exit point\n self.set_score(-1)\n\n # while the ray object has a direction (will be set to none when it reaches an endpoint)\n # send it to the helper function that will move it\n while new_ray.get_direction() != None:\n self.move_ray(new_ray)\n\n # if we hit an exit point (other than through reflection) deduct the point for that\n terminus = new_ray.get_terminal_location()\n # check the the terminal point is an edge (hitting an atom returns none as terminus)\n\n if terminus != None:\n # check that the terminus is not a reflection, which shouldn't be counted twice\n terminal_square = self._board.get_board_square(terminus)\n terminal_square.set_terminating_ray(new_ray)\n if terminus != (origin_row, origin_column):\n self.set_score(-1)\n\n return terminus", "def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"", "def move_to_exit(self, time_move=0.25):\n \n #While the agent is not on the exit, we keep going through the labyrinth\n while self.agent_node.labyrinth_position != self.exit_point.labyrinth_position:\n\n #We use breadth first search to create the tree with the distance of every node from the agent position\n self.breadth_first_search()\n node_to_move_on = self.find_node_to_move_on(self.exit_point)\n self.update_statistics_after_move(node_to_move_on)\n self.set_datas_after_move(node_to_move_on)\n\n #We clear the terminal to print the labyrinth with the new position of the agent\n clear = \"cls\" if platform.system() == \"Windows\" else \"clear\"\n os.system(clear)\n self.print_labyrinth()\n time.sleep(time_move)", "def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)", "def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)", "def _walk(self):\n \n newpos= self.rect.move((self.move, 0)) # x方向移動 .move, y方向不動。\n \n # 偵測碰撞左右牆壁,並處理(反彈)\n if not self.area.contains(newpos):\n if self.rect.left < self.area.left or \\\n self.rect.right > self.area.right:\n self.move = -self.move\n newpos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos", "def rollout(leaf, depth):\n if depth <= 0:\n return 0\n\n total_reward = 0\n prev_state = leaf\n\n for i in range(depth):\n cur_state = prev_state.copy()\n agent_actions = rollout_policy(cur_state)\n agents_obs, _, done, _ = cur_state.game_env.step(agent_actions)\n\n # After making a move, update the memory kept on this node\n cur_state.agent_memory = utility.update_agent_memory(cur_state.agent_memory,\n agents_obs[cur_state.agent_id])\n\n reward = decide_reward(prev_state, cur_state)\n total_reward += reward\n\n prev_state = cur_state\n\n if done:\n break\n\n return total_reward", "def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)", "def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def move_draught_end(event):\n global red_draughts, white_draughts\n global board_array\n global old_point\n global die_1_num, die_2_num, doubles\n draught = board.find_withtag(CURRENT)[0]\n #Figure out which point they want to put it on\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n is_red = draught in red_draughts\n if bottom == False:\n new_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n new_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0]))) \n #Check legality\n if(board_array[new_point][1] > 1 and is_red) or (board_array[new_point][0] > 1 and not is_red): #if too many opposite color on square\n draw_draughts()\n return\n if(board_array[0][0] > 0 and is_red and old_point != 0)or(board_array[25][1] > 0 and not is_red and old_point != 25):#Obligated to move off bar first\n draw_draughts()\n return\n if(new_point == 0 and not is_red): #if white trying to bear off\n for i in xrange(7,26):\n if(board_array[i][1] > 0): #If white has a piece outside home, can't bear off\n draw_draughts()\n return\n if(new_point == 25 and is_red): #if red trying to bear off\n for i in xrange(0,18):\n if(board_array[i][0] > 0): #If red has a piece outside home, can't bear off\n draw_draughts()\n return \n \n if(new_point-old_point == die_1_num and is_red) or (old_point-new_point == die_1_num and not is_red):\n if(doubles == False) or (die_2_num != 0):\n die_1_num = 0\n else: \n die_2_num = die_1_num\n doubles = False\n elif(new_point-old_point == die_2_num and is_red) or (old_point-new_point == die_2_num and not is_red):\n if(doubles == False) or (die_1_num != 0):\n die_2_num = 0\n else: \n die_1_num = die_2_num\n doubles = False\n else: #Can't move there on this roll\n draw_draughts()\n return\n update_dice()\n #Update board_array\n if is_red:\n board_array[old_point][0] -= 1\n board_array[new_point][0] += 1\n if(board_array[new_point][1] == 1): #Handle hits\n board_array[new_point][1] -= 1\n board_array[25][1] += 1\n else:\n board_array[old_point][1] -= 1\n board_array[new_point][1] += 1\n if(board_array[new_point][0] == 1): #Handle hits\n board_array[new_point][0] -= 1\n board_array[0][0] += 1\n\n draw_draughts()\n if(die_1_num == 0 and die_2_num == 0):\n comp_turn()", "def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def Enmove(self):\r\n if self.vel > 0:\r\n if self.rect.x + self.vel < self.path[1]:\r\n self.rect.x += self.vel #Moving enemy towards end of path\r\n else:\r\n if self.flipped: #flip enemy and move along opposite direction\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = False\r\n self.vel = -self.vel\r\n else:\r\n if self.rect.x - self.vel > self.path[0]:\r\n self.rect.x += self.vel #Moving enemy back to starting point\r\n else:\r\n if not self.flipped: #determining whether image should be flipped\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = True \r\n self.vel = -self.vel", "def ray(self):\n return self._ray", "def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def move_down():\n return __maze.move_down()", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def _move(self, d, event):\n\n actor = None\n if event.source in self._crates:\n actor = self._crates[event.source]\n else:\n actor = self._clones[event.source][0]\n\n if d == Direction.NO_ACT or not event.success:\n actor.animation = actor.do_nothing_animation()\n return\n pos = actor.pos\n target = pos.dir_pos(d)\n actor.direction = d\n actor.animation = actor.walk_animation()\n self.repaint()", "def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1", "def search_my_move(self, env: ChessEnv, is_root_node=False) -> float:\n\t\tif env.done:\n\t\t\tif env.winner == Winner.draw:\n\t\t\t\treturn 0\n\t\t\t# assert env.whitewon != env.white_to_move # side to move can't be winner!\n\t\t\treturn -1\n\n\t\tstate = state_key(env)\n\n\t\twith self.node_lock[state]:\n\t\t\tif state not in self.tree:\n\t\t\t\tleaf_p, leaf_v = self.expand_and_evaluate(env)\n\t\t\t\tself.tree[state].p = leaf_p\n\t\t\t\treturn leaf_v # I'm returning everything from the POV of side to move\n\t\t\t#assert state in self.tree\n\n\t\t\t# SELECT STEP\n\t\t\taction_t = self.select_action_q_and_u(env, is_root_node)\n\n\t\t\tvirtual_loss = self.play_config.virtual_loss\n\n\t\t\tmy_visit_stats = self.tree[state]\n\t\t\tmy_stats = my_visit_stats.a[action_t]\n\n\t\t\tmy_visit_stats.sum_n += virtual_loss\n\t\t\tmy_stats.n += virtual_loss\n\t\t\tmy_stats.w += -virtual_loss\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\tenv.step(action_t.uci())\n\t\tleaf_v = self.search_my_move(env) # next move from enemy POV\n\t\tleaf_v = -leaf_v\n\n\t\t# BACKUP STEP\n\t\t# on returning search path\n\t\t# update: N, W, Q\n\t\twith self.node_lock[state]:\n\t\t\tmy_visit_stats.sum_n += -virtual_loss + 1\n\t\t\tmy_stats.n += -virtual_loss + 1\n\t\t\tmy_stats.w += virtual_loss + leaf_v\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\treturn leaf_v", "def ray_status(ray, points, nodes):\n container = find_container(ray, nodes)\n \n # Handle special case of last step where ray is hitting the world node\n root = nodes[0].root\n if container == root and len(nodes) == 1:\n status = root, None, root\n return status\n\n if nodes[0] == container:\n surface_node = nodes[0]\n to_node = nodes[1]\n else:\n surface_node = nodes[0]\n to_node = nodes[0]\n status = container, to_node, surface_node\n return status", "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def traceRay2XY(IKLE,MESHX,MESHY,neighbours,ei,xyi,en,xyn):\n # ~~> latest addition to the ray\n ax,bx,cx = MESHX[IKLE[en]]\n ay,by,cy = MESHY[IKLE[en]]\n bi = getBarycentricWeights( xyi,(ax,ay),(bx,by),(cx,cy) )\n pnt = {'n':1, 'xy':[xyi], 'e':[en], 'b':[bi],\n 'd':[np.power(xyi[0]-xyn[0],2) + np.power(xyi[1]-xyn[1],2)]}\n\n # ~~> convergence on distance to target xyn\n accuracy = np.power(10.0, -5+np.floor(np.log10(abs(ax+bx+cx+ay+by+cy))))\n if pnt['d'][0] < accuracy: return True,pnt\n\n # ~~> get the ray through to the farthest neighbouring edges\n ks = []; ds = []\n for k in [0,1,2]:\n xyj = getSegmentIntersection( (MESHX[IKLE[en][k]],MESHY[IKLE[en][k]]),(MESHX[IKLE[en][(k+1)%3]],MESHY[IKLE[en][(k+1)%3]]),xyi,xyn )\n if xyj == []: continue # there are no intersection with that edges\n ej = neighbours[en][k]\n if ej == ei: continue # you should not back track on your ray\n xyj = xyj[0]\n dij = np.power(xyi[0]-xyj[0],2) + np.power(xyi[1]-xyj[1],2)\n ks.append(k)\n ds.append(dij)\n if ds != []:\n k = ks[np.argmax(ds)]\n ej = neighbours[en][k]\n xyj = getSegmentIntersection( (MESHX[IKLE[en][k]],MESHY[IKLE[en][k]]),(MESHX[IKLE[en][(k+1)%3]],MESHY[IKLE[en][(k+1)%3]]),xyi,xyn )[0]\n djn = np.power(xyn[0]-xyj[0],2) + np.power(xyn[1]-xyj[1],2)\n\n # ~~> Possible recursive call\n if True or djn > accuracy: # /!\\ this may be a problem\n if ej < 0:\n # you have reach the end of the line\n bj = getBarycentricWeights( xyj,(ax,ay),(bx,by),(cx,cy) )\n pnt['n'] += 1; pnt['xy'].insert(0,xyj); pnt['e'].insert(0,en); pnt['b'].insert(0,bj); pnt['d'].insert(0,djn)\n return djn<accuracy,pnt\n else:\n found,ray = traceRay2XY(IKLE,MESHX,MESHY,neighbours,en,xyj,ej,xyn)\n ray['n'] += 1; ray['xy'].append(xyi); ray['e'].append(en); ray['b'].append(bi); ray['d'].append(dij)\n return found,ray\n\n # ~~> convergence on having found the appropriate triangle\n bn = isInsideTriangle( xyn,(ax,ay),(bx,by),(cx,cy) )\n if bn != []:\n pnt['n'] += 1; pnt['xy'].insert(0,xyn); pnt['e'].insert(0,en); pnt['b'].insert(0,bn); pnt['d'].insert(0,0.0)\n return True,pnt\n\n # ~~> you should not be here !\n return False,pnt", "async def _move_radec(self, ra: float, dec: float, abort_event: asyncio.Event) -> None:\n\n # start slewing\n await self.__move(ra, dec, abort_event)", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def search_my_move(self, env:Chess, is_root_node=False) -> float:\n if env.over():\n if env.victor == Victor.draw:\n return 0\n # assert env.whitewon != env.white_to_move # side to move can't be winner!\n return -1\n\n state = board_state_key(env)\n\n with self.node_lock[state]:\n if state not in self.tree:\n leaf_p, leaf_v = self.expand_and_evaluate(env)\n self.tree[state].p = leaf_p\n return leaf_v # I'm returning everything from the POV of side to move\n\n # SELECT STEP\n action_t = self.select_action_q_and_u(env, is_root_node)\n\n virtual_loss = self.play_conf.virtual_loss\n\n my_visit_stats = self.tree[state]\n my_stats = my_visit_stats.a[action_t]\n\n my_visit_stats.sum_n += virtual_loss\n my_stats.n += virtual_loss\n my_stats.w += -virtual_loss\n my_stats.q = my_stats.w / my_stats.n\n\n env.make_move(action_t.uci())\n leaf_v = self.search_my_move(env) # next move from enemy POV\n leaf_v = -leaf_v\n\n # BACKUP STEP\n # on returning search path\n # update: N, W, Q\n with self.node_lock[state]:\n my_visit_stats.sum_n += -virtual_loss + 1\n my_stats.n += -virtual_loss + 1\n my_stats.w += virtual_loss + leaf_v\n my_stats.q = my_stats.w / my_stats.n\n\n return leaf_v", "def set_originating_ray(self, ray):\n\n self._originating_ray = ray", "def move_right():\n return __maze.move_right()", "def step(self, move):", "def apply_move(move):\n\trects_copy = deepcopy(rects)\n\trect_index = move[0]\n\trect_edge = move[1]\n\toffset = int(sqrt(len(rects)))\n\tif rect_edge == 'l':\n\t\trects_copy[rect_index].left_edge = True\n\t\tif rect_index - offset >= 0:\n\t\t\trects_copy[rect_index - offset].right_edge = True\n\telif rect_edge == 'r':\n\t\trects_copy[rect_index].right_edge = True\n\t\tif rect_index + offset <= 8:\n\t\t\trects_copy[rect_index + offset].left_edge = True\n\telif rect_edge == 't':\n\t\trects_copy[rect_index].top_edge = True\n\t\tif rect_index%offset != 0:\n\t\t\trects_copy[rect_index - 1].bottom_edge = True\n\telif rect_edge == 'b':\n\t\trects_copy[rect_index].bottom_edge = True\n\t\tif rect_index%offset != offset - 1:\n\t\t\trects_copy[rect_index + 1].top_edge = True\n\treturn map(get_filled_edges, rects_copy)", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_depth = self.currentState.depth\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n count = self.currentState.nextChildToVisit\n if len(self.currentState.children) > count:\n found_move = True\n break\n if not found_move:\n for all_visited in self.visited.keys():\n all_visited.nextChildToVisit = 0\n current_depth += 1\n if len(self.visited) == 1:\n all_possible_moves = self.gm.getMovables()\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, every_move)\n new_game_state.parent = self.currentState\n self.visited[new_game_state] = False\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n while current_depth != self.currentState.depth:\n count = self.currentState.nextChildToVisit\n self.currentState.nextChildToVisit += 1\n if len(self.currentState.children) > count:\n self.currentState = self.currentState.children[count]\n next_move = self.currentState.requiredMovable\n self.gm.makeMove(next_move)\n else:\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n if len(self.currentState.children) > self.currentState.nextChildToVisit:\n found_move = True\n break\n if not found_move:\n return False\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n all_possible_moves = self.gm.getMovables()\n next_depth = current_depth + 1\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), next_depth, every_move)\n if new_game_state not in self.visited:\n self.visited[new_game_state] = False\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n return False\n else:\n return True", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def move_to_refine(self, des_img_pos, act_img_pos, current_world_pos, increment, img_thresh):\n des_img_x = des_img_pos[0]\n des_img_y = des_img_pos[1]\n act_img_x = act_img_pos[0]\n act_img_y = act_img_pos[1]\n cur_wld_x = current_world_pos[0]\n cur_wld_y = current_world_pos[1]\n new_wld_x = cur_wld_x\n new_wld_y = cur_wld_y\n \n #object to the left -> move left (-wld_y)\n if (act_img_x < des_img_x-img_thresh):\n print(' Moving left')\n new_wld_y = cur_wld_y + increment\n #object to the right -> move right (+wld_y)\n elif (act_img_x > des_img_x+img_thresh):\n new_wld_y = cur_wld_y - increment\n print(' Moving right')\n #object to the top -> move forward (+wld_x)\n if (act_img_y < des_img_y-img_thresh):\n new_wld_x = cur_wld_x + increment\n print(' Moving forward')\n #object to the bottom -> move backward (-wld_x)\n elif (act_img_y > des_img_y+img_thresh):\n new_wld_x = cur_wld_x - increment\n print(' Moving backward')\n \n #move arm to new coordinates\n self.move_to(new_wld_x, new_wld_y, self.move_to_height)\n \n #return new arm position\n return [new_wld_x, new_wld_y]", "def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def move(): #py:move\n RUR._move_()", "def move(self, dx=0, dy=0):\n\n # Test if we enter the actionable zone of an entity\n # Note: this can be a door to open, or a fight!\n for entity in GLOBAL.game.current_region.region_entities:\n if entity != self and hasattr(entity, \"actionable\") and entity.actionable is not None and \\\n (self.x + dx, self.y + dy) in entity.actionable.action_field:\n self.x += dx\n self.y += dy\n ok_to_move = entity.actionable.action(self)\n self.x -= dx\n self.y -= dy\n if ok_to_move is not None and not ok_to_move:\n # We triggered an object, and it prevented the move (like a door not opening)\n return False\n\n if entity != self and hasattr(entity, \"fighter\") and entity.fighter is not None and \\\n (self.x + dx, self.y + dy) in entity.fighter.action_field:\n self.x += dx\n self.y += dy\n ok_to_move = entity.fighter.action(self)\n self.x -= dx\n self.y -= dy\n if ok_to_move is not None and not ok_to_move:\n # We came in a fight...\n return False\n\n # Test if we collide with the terrain, and terrain only\n destination_tile = GLOBAL.game.current_region.tiles[self.x + dx][self.y + dy]\n if not destination_tile.block_for(self):\n # now test the list of objects\n for entity in GLOBAL.game.current_region.region_entities:\n if entity != self and entity.blocks and entity.x == self.x + dx and entity.y == self.y + dy:\n return False\n # success\n self.x += dx\n self.y += dy\n if self.animated and (dx != 0 or dy != 0):\n self.last_direction = (dx, dy)\n\n GLOBAL.game.invalidate_fog_of_war = True\n # self.game.ticker.ticks_to_advance += self.speed_cost_for(c.AC_ENV_MOVE)\n return True\n\n return False", "def cutDownAngle_gk(state, raySortie):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = raySortie\n position += diff\n return goTo(state,position)", "def step(self, action):\n\n previous_state = self.state\n self._make_action(action) \n self.step_simulation()\n self._make_observation() # Update state\n \n ###################\n ### Reward function\n\n body_position = self.agent.get_position('Torso') # x,y,z coordinates of the agent\n r_foot_collision, l_foot_collision = self.state[-2:] # Feet collision indicators [0/1]\n roll, pitch = self.state[12:14] # Roll and pitch of the agent's convex hull\n\n # Staying upright\n posture = 0\n if abs(roll) > abs(previous_state[12]):\n posture -= .1\n else:\n posture += .125\n\n if abs(pitch) > abs(previous_state[13]):\n posture -= .1\n else:\n posture += .125\n \n hull = 0\n if abs(roll) < .125 and abs(pitch) < .125:\n posture += .1\n # Lifting feet while upright\n # collisions = np.count_nonzero(self.state[14::])\n # posture = (2 - collisions) * .\n\n # Hull location\n progress = body_position[0] - self.previous_body_position\n if progress > 0: \n hull = 0.1 + progress * 40\n if hull > .5: hull = .5\n else:\n hull = -0.1 + progress * 40\n if hull < -.5: hull = -.5\n self.previous_body_position = body_position[0]\n\n \"\"\"\n STATE SPACE:\n include:\n 1. Angular velocity of the torso (also normal velocity?) both can be obtained through gyro and accelerometer\n 2. Change to orientation of the torso instead of convex hull\n 3. \n \"\"\"\n\n # Feet distance\n # Use multiplicative reward?\n # Change in feet position along the X axis\n # pos_lfoot = self.agent.get_position('LFoot')[0]\n # pos_rfoot = self.agent.get_position('RFoot')[0]\n # distance_lfoot = (pos_lfoot - self.previous_feet_position[0])\n # distance_rfoot = (pos_rfoot - self.previous_feet_position[1])\n # if self.previous_feet_position[0] != 0:\n # feet_distance = (distance_lfoot + distance_rfoot) * 100\n # else:\n # feet_distance = 0\n\n # self.previous_feet_position = [pos_lfoot, pos_rfoot]\n\n base = 0.05\n reward = base + posture + hull\n # print('hull: {}'.format(hull))\n # print('posture: {}'.cformat(posture))\n\n # End condition\n if (abs(roll) > self.fall_threshold or abs(pitch) > self.fall_threshold):\n reward -= 2\n self.done = True \n\n # print('Posture: {} \\n Hull: {}'.format(posture, hull))\n # print('Total reward: {}'.format(reward))\n\n return self.state, reward, self.done, {}", "def move_draught(event):\n global red_turn\n if(red_turn == False):\n return\n draught = board.find_withtag(CURRENT)[0]\n board.coords(draught,event.x-click_offset[0],event.y-click_offset[1],event.x-click_offset[0]+board_divisions,event.y-click_offset[1]+board_divisions)", "def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (self.rect.y == goal_y):\n # pygame.quit()\n # sys.exit(0)\n\n self.change_x = 0\n self.change_y = 0", "def move_waters (pdb_hierarchy, xray_structure, out) :\n if (len(pdb_hierarchy.models()) > 1) :\n raise Sorry(\"Rearranging water molecules is not supported for \"+\n \"multi-MODEL structures.\")\n sel_cache = pdb_hierarchy.atom_selection_cache()\n water_sel = sel_cache.selection(\"resname HOH or resname WAT\")\n n_waters = water_sel.count(True)\n if (n_waters == 0) :\n print >> out, \"No waters found, skipping\"\n return pdb_hierarchy, xray_structure\n else :\n print >> out, \"%d atoms will be moved.\" % n_waters\n hierarchy_water = pdb_hierarchy.select(water_sel)\n hierarchy_non_water = pdb_hierarchy.select(~water_sel)\n xrs_water = xray_structure.select(water_sel)\n xrs_non_water = xray_structure.select(~water_sel)\n for chain in hierarchy_water.only_model().chains() :\n hierarchy_non_water.only_model().append_chain(chain.detached_copy())\n xrs_non_water.add_scatterers(xrs_water.scatterers())\n assert len(xrs_non_water.scatterers()) == \\\n len(hierarchy_non_water.atoms()) == len(pdb_hierarchy.atoms())\n for atom, scat in zip(hierarchy_non_water.atoms(),\n xrs_non_water.scatterers()) :\n assert (atom.id_str() == scat.label)\n return hierarchy_non_water, xrs_non_water", "def ray_trace(self, max_iterations=25):\n if not bool(self.optical_system):\n return\n \n self.clear_ray_history() \n starting_rays = self.optical_system._amalgamated_sources.copy()\n for i in range(max_iterations):\n result = self.single_pass(starting_rays)\n \n if bool(result):\n starting_rays = result\n else:\n break", "def update_return(self):\n if abs(self.eaten_time - time.get_ticks()) > self.return_delay:\n self.image, _ = self.eyes.get_image(key=self.direction)\n test = self.check_path_tile()\n if test == '*':\n self.state['return'] = False\n self.direction = self.get_chase_direction(self.get_direction_options())\n else:\n self.direction = self.get_dir_from_path()\n if self.direction == 'u':\n self.rect.centery -= self.speed\n elif self.direction == 'l':\n self.rect.centerx -= self.speed\n elif self.direction == 'd':\n self.rect.centery += self.speed\n elif self.direction == 'r':\n self.rect.centerx += self.speed", "def _trace_ray(self, ray, depth=0, max_depth=5):\n\n color = Color()\n\n if depth >= max_depth:\n return color\n\n intersection = self._get_intersection(ray)\n if intersection is None:\n return color\n\n obj, dist = intersection\n intersection_pt = ray.point_at_dist(dist)\n surface_norm = obj.surface_norm(intersection_pt)\n\n # ambient light\n # color += obj.material.color * obj.material.ambient\n\n point_on_plane = ray.origin + dist*ray.direction\n imgx = point_on_plane.x\n imgy = np.sqrt(point_on_plane.y*point_on_plane.y + point_on_plane.z*point_on_plane.z)\n\n\n '''\n # Nearest Texel\n int_imgx = int(round(imgx))\n int_imgy = int(round(imgy))\n if int_imgx == 512:\n int_imgx = 511\n if int_imgy == 512:\n int_imgy = 511\n color += Color(img[int_imgx, int_imgy, 0], img[int_imgx, int_imgy, 1], img[int_imgx, int_imgy, 2])\n '''\n\n\n # Bilinearly Interpolated Texel\n ceilx = int(math.ceil(imgx))\n ceily = int(math.ceil(imgy))\n floorx = int(math.floor(imgx))\n floory = int(math.floor(imgy))\n if ceilx >= 512:\n ceilx = 511\n if ceily >= 512:\n ceily = 511\n if floorx >= 512:\n floorx = 511\n if floory >= 512:\n floory = 511\n interpolate_x1 = (ceilx - imgx) * (img[ceilx, ceily]) + (imgx - floorx) * (img[floorx, ceily])\n interpolate_x2 = (ceilx - imgx) * (img[ceilx, floory]) + (imgx - floorx) * (img[floorx, floory])\n interpolate_y = (ceily - imgy) * interpolate_x1 + (imgy - floory) * interpolate_x2\n color += Color(interpolate_y[0], interpolate_y[1], interpolate_y[2])\n # print color\n\n\n '''\n # lambert shading\n for light in self.lights:\n pt_to_light_vec = (light - intersection_pt).normalize()\n pt_to_light_ray = Ray(intersection_pt, pt_to_light_vec)\n if self._get_intersection(pt_to_light_ray) is None:\n lambert_intensity = surface_norm * pt_to_light_vec\n if lambert_intensity > 0:\n color += obj.material.color * obj.material.lambert * \\\n lambert_intensity\n\n \n # specular (reflective) light\n reflected_ray = Ray(\n intersection_pt, ray.direction.reflect(surface_norm).normalize())\n color += self._trace_ray(reflected_ray, depth + 1) * \\\n obj.material.specular\n '''\n return color", "def get_action_for_move(\n agent_position: Tuple[int, int],\n agent_direction: Grid4TransitionsEnum,\n next_agent_position: Tuple[int, int],\n next_agent_direction: int,\n rail: GridTransitionMap) -> Optional[RailEnvActions]:\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n valid_action = RailEnvActions.MOVE_FORWARD\n new_direction = (agent_direction + 2) % 4\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif num_transitions == 1:\n valid_action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n valid_action = RailEnvActions.MOVE_FORWARD\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction + 1) % 4:\n valid_action = RailEnvActions.MOVE_RIGHT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction - 1) % 4:\n valid_action = RailEnvActions.MOVE_LEFT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False", "def main_ray_cast(self, context, event):\r\n # get the context arguments\r\n MPM = bpy.context.window_manager.MPM\r\n scene = context.scene\r\n region = context.region\r\n rv3d = context.region_data\r\n coord = event.mouse_region_x, event.mouse_region_y\r\n \r\n # get the ray from the viewport and mouse\r\n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\r\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\r\n \r\n ray_target = ray_origin + view_vector\r\n \r\n def visible_objects_and_duplis():\r\n \"\"\"Loop over (object, matrix) pairs (mesh only)\"\"\"\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()\r\n \r\n def obj_ray_cast(obj, matrix):\r\n \"\"\"Wrapper for ray casting that moves the ray into object space\"\"\"\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None\r\n \r\n # cast rays and find the closest object\r\n best_length_squared = -1.0\r\n best_obj = None\r\n \r\n for obj, matrix in visible_objects_and_duplis():\r\n if obj.type == 'MESH':\r\n hit, normal, face_index = obj_ray_cast(obj, matrix)\r\n if hit is not None:\r\n hit_world = matrix * hit\r\n length_squared = (hit_world - ray_origin).length_squared\r\n if best_obj is None or length_squared < best_length_squared:\r\n best_length_squared = length_squared\r\n best_obj = obj\r\n \r\n if best_obj is not None:\r\n if self.on_curve:\r\n if best_obj != bpy.context.active_object:\r\n if self.choose_start:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap == best_obj else best_obj\r\n self.choose_start = False\r\n \r\n if self.choose_end:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap == best_obj else best_obj\r\n self.choose_end = False\r\n \r\n if self.choose_profile:\r\n \r\n curve = bpy.context.active_object.modifiers[\"Curve\"].object\r\n start = bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap else \"\"\r\n end = bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap else \"\"\r\n \r\n bpy.ops.object.modifier_remove(modifier = \"Array_on_curve\")\r\n bpy.ops.object.modifier_remove(modifier = \"Curve\")\r\n bpy.context.active_object.select = False\r\n best_obj.select = True\r\n bpy.context.scene.objects.active = best_obj\r\n best_obj.modifiers.new(\"Array_on_curve\", 'ARRAY')\r\n MPM.array_name = \"Array_on_curve\"\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[self.axis_value] = 1\r\n for i in range(3):\r\n if i != self.axis_value:\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[i]=0\r\n best_obj.modifiers[\"Array_on_curve\"].fit_type = 'FIT_CURVE'\r\n best_obj.modifiers[\"Array_on_curve\"].curve = curve\r\n best_obj.modifiers[\"Array_on_curve\"].use_merge_vertices = True\r\n if start:\r\n best_obj.modifiers[\"Array_on_curve\"].start_cap = start if start != best_obj else None\r\n \r\n if end:\r\n best_obj.modifiers[\"Array_on_curve\"].end_cap = end if end != best_obj else None\r\n \r\n # setup the curve modifier\r\n best_obj.modifiers.new(\"Curve\", 'CURVE')\r\n best_obj.modifiers[\"Curve\"].object = curve\r\n self.setup_deform_axis(best_obj.modifiers, self.axis_value)\r\n \r\n self.choose_profile = False", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def go_to_exit(self):\n ys = [self.currY]\n xs = [self.currX]\n options = np.zeros((self.h, self.w), np.uint8)\n visited = np.zeros((self.h, self.w), np.bool_)\n visited[self.currY, self.currX] = True\n distance = 1\n while True:\n while len(ys) > 0:\n cur = (ys.pop(), xs.pop())\n for d, m in enumerate(self.__get_map_offsets()):\n if (m[cur[0], cur[1]] > 1) and (\n not visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]]):\n options[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = distance\n visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = True\n if (cur[0] + self.directions[d][0] == self.exitY) and (\n cur[1] + self.directions[d][1] == self.exitX):\n return self.__convert_to_path_exit(options)\n yTemp, xTemp = np.where(options == distance)\n ys += yTemp.tolist()\n xs += xTemp.tolist()\n distance += 1", "def cutDownAngle(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(raySortie, diff.norm - rayInter)\n position += diff\n return goTo(state,position)", "def trace_path(ray, container_node, distance):\n if distance < 2*EPS_ZERO:\n # This is a very small step size. It could occur naturally, but it is much\n # more likely to be a bug\n raise TraceError(\"Distance is on the order of trace epsilon.\")\n\n # Trace the ray through the material\n local_ray = ray.representation(\n container_node.root, container_node\n )\n for (local_ray, decision) in container_node.geometry.material.trace_path(\n local_ray, container_node.geometry, distance):\n new_ray = local_ray.representation(\n container_node, container_node.root\n )\n yield new_ray, decision", "def propagate(self, time):\n return Ray(self.direction, self.position + time * self.direction)", "def get_reward(self):\n #original reward function: reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n thrusts = self.sim.get_propeler_thrust(self.sim.prop_wind_speed)\n linear_forces = self.sim.get_linear_forces(thrusts)\n distance = np.linalg.norm(self.target_pos - self.sim.pose[:3])\n #speed = math.sqrt(np.square(self.sim.find_body_velocity()).sum())\n #with 300x300x300m env, the max distance from one corner to another is 519\n max_distance = 519\n #Focus quadcopter on not crashing but first rewarding an upward linear force until at the height of the target\n if self.sim.pose[2] < self.target_pos[2]:\n #velocity_discount = 1/speed\n reward = np.tanh(linear_forces[2])\n #after getting to the correct z-coordinate, move to the correct y-coordinate\n elif self.sim.pose[1] < self.target_pos[1]:\n #velocity_discount = 1/speed\n reward = 1 + np.tanh(linear_forces[1])\n #finally, after getting rewards for the x and y coordinates, give reward for distance\n #at this stage, the drone will have overshot the x and y coordinates, but it would be in a better area to\n #start searching for the x coordinate\n elif distance > 1 and self.sim.pose[2] > self.target_pos[2] and self.sim.pose[1] > self.target_pos[1] :\n reward = 2 + (1-math.pow((distance/300),.04))\n elif distance < 1:\n self.success = True\n reward = 100\n #possible reward for hover: np.exp(-np.square(linear_forces[2]))\n return reward", "def intersectsRay(self, ray):\n pass", "def move(self, direction):\n head = self.snake[0]\n delta = self.dirs[direction]\n nextMove = [head[0] + delta[0], head[1] + delta[1]]\n if not self.isValidMove(nextMove):\n return -1\n\n if self.food and nextMove == self.food[0]:\n self.food.popleft()\n else:\n self.snake.pop()\n\n self.snake.appendleft(nextMove)\n\n return len(self.snake) - 1", "def move_dart(self):\n global level\n if level == 0:\n self.rect.centerx+=self.delta\n if self.rect.centerx >= 1000: \n self.delta = -1\n elif self.rect.centerx < 500:\n self.delta = 1\n elif level == 1:\n self.rect.centery+=self.delta\n if self.rect.centery <= 150: \n self.delta = 2\n elif self.rect.centery > 650:\n self.delta = -2\n elif level == 2:\n self.rect.centerx+=self.delta #To make changes in both x and y direction\n self.rect.centery+=self.delta\n if self.rect.centerx < 100 or self.rect.centery <= 100: \n self.delta = random.randint(1,10) #adds random speeds to the motion\n elif self.rect.centerx >= 900 or self.rect.centery > 700:\n self.delta = -random.randint(1,10)", "def loseMark(state, rayPressing, distDemar, angleInter):\n opp = state.nearest_opponent(rayPressing)\n if opp is None:\n return shiftAside(state, distDemar, angleInter)\n return shiftAsideMark(state, opp, distDemar)", "def createRay(scorefn, resolution, opponent, ball, angle, maxBounces):\n scorefn = targetGoal['score']\n createRay(scorefn, pos, pos, angle, 3)", "def rollout(agent, env):\n # run until episode ends\n episode_reward = 0\n done = False\n obs = env.reset()\n while not done:\n action = agent.compute_action(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n \n return episode_reward", "def move(s, a, beta):\n # update velocity with probability 1-beta\n global V\n if np.random.choice(2, p=[beta, 1-beta]) == 1:\n if a in [0, 3, 6] and V > 0: V -= 1\n elif a in [2, 5, 8] and V < 3: V += 1\n # else:\n # print \"velocity not updated!\"\n\n r_border = range(6, 49, 7) # states on the right border\n l_border = range(0, 49, 7) # states on the left border\n t_border = range(7) # states on the top border\n\n units = range(V)\n # move RIGHT of V units:\n if a < len(ACTIONS) / 3:\n for i in units:\n WORLD[STATE2WORLD[s+i]] = '~' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s+i in r_border or s+i+1 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s+V]] = 'O'\n return s+V, STEP\n\n # move UP of V units:\n elif a < 2*len(ACTIONS) / 3:\n for i in units:\n WORLD[STATE2WORLD[s-i*7]] = '|' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s-i*7 in t_border or s-(i+1)*7 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s-V*7]] = 'O'\n return s-V*7, STEP\n\n # move LEFT of V units:\n elif a < len(ACTIONS):\n for i in units:\n WORLD[STATE2WORLD[s-i]] = '~' # draw my path gradualy in the world\n # goal: draw where I end up & return\n if s-i-1 in GOALS:\n WORLD[STATE2WORLD[s-i-1]] = 'O'\n return s-i-1, WIN\n # crash: reset world and velocities, return to start state\n elif s-i in l_border or s-i-1 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s-V]] = 'O'\n return s-V, STEP\n\n return s, STEP # should never happen", "def leave_wall(self, x, y, theta):\n #print \"leaving wall\"\n #self.current_x = x\n #self.current_y = y\n #self.wp_current_leave = Point(self.current_x,self.current_y)\n #BugBrain.wp_leave_wall_array.append(self.wp_current_leave)\n #print len(BugBrain.wp_leave_wall_array)\t\n # compute and store necessary variables\n #pass", "def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True", "def hit_wall(self):\n if self.ball.x <= 0 or self.ball.x + self.ball.width > self.window.width:\n self.__dx = -self.__dx\n if self.ball.y <= 0:\n self.__dy = -self.__dy", "def solveOneStep(self):\n ### Student code goes here\n state = self.currentState\n #print (type(state))\n self.visited[state] = True\n #print (type(self.gm.getGameState()))\n moves = self.gm.getMovables()\n print (\"CURRENTSTATE\" + str(self.currentState.state))\n print (\"MOVABLES:\")\n if moves:\n for m in moves:\n print (str(m))\n print (\"CHILDINDEX:\")\n print (state.nextChildToVisit)\n print (\"*********\")\n if state.state == self.victoryCondition:\n return True\n #if no child to expand then go back\n if not moves or state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n if state.requiredMovable is not None:\n self.gm.reverseMove(state.requiredMovable)\n # expand\n else:\n\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n #if to parent or if visited then skip\n while (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n print (\"PARENT FOUND!\")\n self.gm.reverseMove(next_move)\n if state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n return False\n else:\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n next_state = GameState(self.gm.getGameState(), state.depth + 1, next_move)\n next_state.parent = state\n #next_state.requiredMovable = next_move\n state.children.append(next_state)\n self.currentState = next_state\n print (state.nextChildToVisit)\n return False", "def intersectRay(self, ray):\n # Ray Tracing from the Ground Up, pg. 367\n a, b, c, d = self.a[0] - self.b[0], self.a[0] - self.c[0], ray.d[0], self.a[0] - ray.o[0]\n e, f, g, h = self.a[1] - self.b[1], self.a[1] - self.c[1], ray.d[1], self.a[1] - ray.o[1]\n i, j, k, L = self.a[2] - self.b[2], self.a[2] - self.c[2], ray.d[2], self.a[2] - ray.o[2]\n\n m, n, p = f * k - g * j, h * k - g * L, f * L - h * j\n q, s = g * i - e * k, e * j - f * i\n\n denom = a * m + b * q + c * s\n if denom < self.kEpsilon:\n return None\n\n inv_denom = 1.0 / denom\n\n e1 = d * m - b * n - c * p\n beta = e1 * inv_denom\n\n if 1.0 < beta or beta < 0.0:\n return None\n\n r = e * L - h * i\n e2 = a * n + d * q + c * r\n gamma = e2 * inv_denom\n\n if 1.0 < gamma or gamma < 0.0:\n return None\n\n e3 = a * p - b * r + d * s\n t = e3 * inv_denom\n\n if t < self.kEpsilon:\n return None\n\n return t", "def shift_board(self, dx, dy):\n self.board = np.roll(self.board, dy, axis=0)\n self.board = np.roll(self.board, dx, axis=1)\n self.agent_locs += [dy, dx]\n self.agent_locs %= self.board.shape\n self.update_exit_locs()", "def iterate_ray(opt_model, ifcx, xy_target, fld, wvl, **kwargs):\n def y_stop_coordinate(y1, *args):\n seq_model, ifcx, pt0, dist, wvl, y_target = args\n pt1 = np.array([0., y1, dist])\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n if dir0[2] * seq_model.z_dir[0] < 0:\n dir0 = -dir0\n\n try:\n ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)\n except TraceMissedSurfaceError as ray_miss:\n ray = ray_miss.ray_pkg\n if ray_miss.surf <= ifcx:\n raise ray_miss\n except TraceTIRError as ray_tir:\n ray = ray_tir.ray_pkg\n if ray_tir.surf < ifcx:\n raise ray_tir\n y_ray = ray[ifcx][mc.p][1]\n# print(y1, y_ray)\n return y_ray - y_target\n\n def surface_coordinate(coord, *args):\n seq_model, ifcx, pt0, dist, wvl, target = args\n pt1 = np.array([coord[0], coord[1], dist])\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n if dir0[2] * seq_model.z_dir[0] < 0:\n dir0 = -dir0\n ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)\n xy_ray = np.array([ray[ifcx][mc.p][0], ray[ifcx][mc.p][1]])\n# print(coord[0], coord[1], xy_ray[0], xy_ray[1])\n return xy_ray - target\n\n seq_model = opt_model.seq_model\n osp = opt_model.optical_spec\n\n fod = opt_model['analysis_results']['parax_data'].fod\n dist = fod.obj_dist + fod.enp_dist\n\n pt0 = osp.obj_coords(fld)\n if ifcx is not None:\n if pt0[0] == 0.0 and xy_target[0] == 0.0:\n # do 1D iteration if field and target points are zero in x\n y_target = xy_target[1]\n logging.captureWarnings(True)\n try:\n start_y, results = newton(y_stop_coordinate, 0.,\n args=(seq_model, ifcx, pt0,\n dist, wvl, y_target),\n disp=False, full_output=True)\n except RuntimeError as rte:\n # if we come here, start_y is a RuntimeResults object\n # print(rte)\n start_y = results.root\n except TraceError:\n start_y = 0.0\n start_coords = np.array([0., start_y])\n else:\n # do 2D iteration. epsfcn is a parameter increment,\n # make proportional to pupil radius\n try:\n start_coords = fsolve(surface_coordinate, np.array([0., 0.]),\n epsfcn=0.0001*fod.enp_radius,\n args=(seq_model, ifcx, pt0, dist,\n wvl, xy_target))\n except TraceError:\n start_coords = np.array([0., 0.])\n else: # floating stop surface - use entrance pupil for aiming\n start_coords = np.array([0., 0.]) + xy_target\n\n return start_coords", "def cutDownAngle_def(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(min(raySortie, diff.norm - rayInter), 20.)\n position += diff\n return goTo(state,position)", "def movement(self, walls: list):\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_LEFT]: # Angle of vue\r\n self.angle -= self.turn_speed\r\n if keys[pygame.K_RIGHT]:\r\n self.angle += self.turn_speed\r\n\r\n if keys[pygame.K_w]:\r\n next_x = self.x + round(self.speed * math.cos(self.angle)) # same formula then in raycast.ray_casting\r\n next_y = self.y + round(self.speed * math.sin(self.angle))\r\n if walls[int(next_y / game_settings.TILE)][int(next_x / game_settings.TILE)]:\r\n # if we're going into a wall,\r\n # we dont go into the wall.\r\n return\r\n\r\n self.x = next_x\r\n self.y = next_y\r\n\r\n if keys[pygame.K_s]:\r\n next_x = self.x - round(self.speed * math.cos(self.angle))\r\n next_y = self.y - round(self.speed * math.sin(self.angle))\r\n if walls[int(next_y / game_settings.TILE)][int(next_x / game_settings.TILE)]:\r\n return\r\n\r\n self.x = next_x\r\n self.y = next_y\r\n\r\n if keys[pygame.K_a]:\r\n next_x = self.x + round(self.speed * math.sin(self.angle))\r\n next_y = self.y - round(self.speed * math.cos(self.angle))\r\n if walls[int(next_y / game_settings.TILE)][int(next_x / game_settings.TILE)]:\r\n return\r\n\r\n self.x = next_x\r\n self.y = next_y\r\n\r\n if keys[pygame.K_d]:\r\n next_x = self.x - round(self.speed * math.sin(self.angle))\r\n next_y = self.y + round(self.speed * math.cos(self.angle))\r\n if walls[int(next_y / game_settings.TILE)][int(next_x / game_settings.TILE)]:\r\n return\r\n\r\n self.x = next_x\r\n self.y = next_y", "def env_step(self, action):\n random_prob = np.random.uniform(0, 1)\n if random_prob <= self.stochasticity: # Ignore agent's action and move to one of the 8 neighbours\n # Determine how the agent moves (from -1 to 1 in each direction, but not both 0)\n random_nn = np.random.randint(0, len(self.nn))\n random_y = self.nn[random_nn, 0]\n random_x = self.nn[random_nn, 1]\n\n # Move to one of the nearest neighbours\n self.current_state[0] += random_y\n self.current_state[1] += random_x\n else: # Perform agent's action\n # Update current stated based on the action the agent took\n curr_x = self.current_state[1]\n self.current_state[0] += self.actions[action][0] + self.wind[curr_x]\n self.current_state[1] += self.actions[action][1]\n\n # Check if the agent fell out of the boundaries of the grid world\n y_coord = self.current_state[0]\n x_coord = self.current_state[1]\n\n if y_coord >= self.num_rows: # Agent went too far up\n self.current_state[0] = self.num_rows - 1\n elif y_coord < 0: # Agent went too far down\n self.current_state[0] = 0\n\n if x_coord >= self.num_cols: # Agent went too far right\n self.current_state[1] = self.num_cols - 1\n elif x_coord < 0: # Agent went too far left\n self.current_state[1] = 0\n\n is_terminal = False\n reward = -1.0\n\n # Check if the agent reached a terminal state\n if self.current_state == self.terminal_state:\n is_terminal = True\n reward = 0.0\n\n return reward, self.current_state, is_terminal", "def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove", "def getMove(self, grid):\n\t\tmove = self.performIterativeDepthSearch(grid)\n\t\tendtime = time.clock()\n\t\t#print (endtime - starttime)\n\t\treturn move", "def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None", "def _move(self, dx, dy):\n pass # must override in subclass", "def pre_or_post_turn(self, game_field, all_ghost_out:bool):\r\n\r\n reference_pos = self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2 #< Positon is set to center of Pac-Man so there is no difference in which direction he moves\r\n field = game_field.possible_way(reference_pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n self.dist = reference_pos[0] % self.grid_size, reference_pos[1] % self.grid_size\r\n\r\n # Check if Pac-Man is moving to the right \r\n if self.direction == 'r':\r\n\r\n # dist to the center of the crossing less then grid_size//2 -> it's a preturn\r\n if self.dist[0] < self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # dist to the center of the crossing greater then grid_size//2 -> it's a postturn\r\n elif self.dist[0] > self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n # The rest of the function does the same as above, just for the other three directions \r\n\r\n elif self.direction == 'l':\r\n #Preturn left\r\n if self.dist[0] > self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n #Postturn left\r\n elif self.dist[0] < self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'u':\r\n #Preturn up\r\n if self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n #Postturn up\r\n elif self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += self.grid_size - (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'd':\r\n #Preturn down\r\n if self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n #Postturn down\r\n elif self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n pass", "def move(self, direction: str) -> int:\n x, y = self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1]\n # print(x,y)\n # went out bound\n if x >= self.n or y >= self.m or x < 0 or y < 0:\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n\n return len(self.snake) - 1", "def _animate(self):\n steps = (1, 7, 14)\n if self.rect.x < self.start_x - 100:\n self.change_dir = False\n elif self.rect.x > self.start_x + 100:\n self.change_dir = True\n self.direction = -1 if self.change_dir else 1\n self.rect.x += self.direction * choice(steps)", "def agent(game_board, max_depth=2):\n move_num = expecti_minimax(game_board, True, max_depth)[1]\n move = ['L', 'R', 'U', 'D'][move_num]\n return move", "def movement(scale, direction):\n try:\n if direction == left:\n args[0].umvr(-scale, log=False, newline=False)\n elif direction == right:\n args[0].umvr(scale, log=False, newline=False)\n elif direction == up:\n args[1].umvr(scale, log=False, newline=False)\n elif direction == down:\n args[1].umvr(-scale, log=False, newline=False)\n except Exception as exc:\n logger.error('Error in tweak move: %s', exc)\n logger.debug('', exc_info=True)", "def comp_turn():\n global red_turn,board_array,die_1_num,die_2_num\n roll()\n red_turn = False\n value,move = backgammon_AI.choose_move(board_array,die_1_num,die_2_num,doubles)\n print value,move\n if(value != -1000):\n for sub_move in move:\n board_array[sub_move[0]][1] -= 1\n board_array[sub_move[1]][1] += 1\n if(board_array[sub_move[1]][0] == 1): #Handle hits\n board_array[sub_move[1]][0] -= 1\n board_array[0][0] += 1\n die_1_num = 0\n die_2_num = 0\n update_dice()\n draw_draughts()\n red_turn = True", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def rayleigh(th,r,wl,a,n1,n2):\n c = np.cos(th)\n c2,s2 = c**2, np.sin(th)**2\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n m = (k**4)*(a**6)*(abs(n_2-1)**2) / ((abs(n_2+2)**2) * 2 * (r**2))\n return m*np.array([[1+c2 , -s2 , 0 , 0],\n [-s2 , 1+c2 , 0 , 0],\n [0 , 0 , 2*c , 0],\n [0 , 0 , 0 , 2*c]])", "def move_objects(time_elapsed):\n move_object(ball, time_elapsed)\n\n # Bounce the ball off the sides, if necessary.\n bounce_off_walls(ball, bounds_left, bounds_right,\n bounds_top, bounds_bottom)", "def virtual_move_to(self,path):\n #print path\n temp_state=self.state\n temp_data=self.data\n temp_current_node=self.current_node\n temp_node_names=self.node_names\n for index,edge in enumerate(path):\n #print edge\n edge_pattern='edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)'\n match=re.match(edge_pattern,edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print(\"moving {0} -> {1}\".format(begin_node,end_node))\n #print self.data\n temp_data=self.__dict__[edge](temp_data)\n #print self.data\n temp_current_node=match.groupdict()['end_node']\n temp_state=[0 for i in range(len(temp_node_names))]\n position=temp_node_names.index(temp_current_node)\n temp_state[position]=1\n #print temp_state\n #print self.state\n #print self.current_node", "def trace(self):\n\n \n assert self.scene != None, \"The photon's scene variable is not set.\"\n \n intersection_points, intersection_objects = self.scene.intersection(self.ray)\n\n \"\"\"\n #DIAGNOSTICS\n print \"\\nnew\\n\"\n print self.position, self.direction, \"\\n\"\n print intersection_points, \"\\n\"\n for i in range(0, len(intersection_objects)):\n print \"Object: \", intersection_objects[i].name, \" - Intersection: \", intersection_points[i]\n \"\"\"\n \n assert intersection_points != None, \"The ray must intersect with something in the scene to be traced.\"\n \n if self.container is None:\n self.container = self.scene.container(self)\n assert self.container != None, \"Container of ray cannot be found.\"\n \n #import pdb; pdb.set_trace()\n #import pudb; pudb.set_trace()\n intersection_points, intersection_objects = Scene.sort(intersection_points, intersection_objects, self, container=self.container, show_log=self.show_log)\n \n # find current intersection point and object -- should be zero if the list is sorted!\n intersection = closest_point(self.position, intersection_points)\n for i in range(0,len(intersection_points)):\n if list(intersection_points[i]) == list(intersection):\n index = i\n break\n \n #import pdb; pdb.set_trace()\n intersection_object = intersection_objects[index]\n assert intersection_object != None, \"No intersection points can be found with the scene.\"\n \n \n \"\"\"\n #DIAGNOSTICS\n print \"\\n\", intersection, \"\\n\"\n print intersection_object.name \n \"\"\" \n \n \n # Reached scene boundaries?\n if intersection_object is self.scene.bounds:\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n\n\n # Reached a RayBin (kind of perfect absorber)?\n if isinstance(intersection_object, RayBin):\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n \n \n # Here we trace the ray through a Coating\n if isinstance(self.container, Coating):\n normal = intersection_object.shape.surface_normal(self.ray)\n self = self.container.material.trace(self, normal, separation(self.position, intersection))\n self.exit_device = self.container\n self.previous_container = self.container\n self.container = self.scene.container(self)\n return self\n \n \n # Here we determine if the Coating has been hit\n if isinstance(intersection_object, Coating) and intersection_object.shape.on_surface(self.position):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right.\"\n return self\n \n \n # Here we trace the ray through a Material\n self = self.container.material.trace(self, separation(self.position, intersection))\n \n \n # Lost in material?\n # Photon has been re-absorbed but NOT re-emitted, i.e. is inactive\n if not self.active:\n #01/04/10: Unification --> Next two lines came from older Trace version\n self.exit_device = self.container\n self.exit_material = self.container.material\n return self \n \n # Reaches interface\n # Photon has been re-absorbed AND re-emitted, i.e. is still active\n ray_on_surface = intersection_object.shape.on_surface(self.position)\n if not ray_on_surface: \n self.exit_device = self.container\n return self\n \n # Ray has reached a surface of some description, increment the intersection counter\n self.intersection_counter += 1\n \n # If we reach an reflective material then we don't need to follow \n # this logic we can just return\n if ray_on_surface and isinstance(intersection_object, Coating):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n return self\n \n # KARLG NEW CODE HERE\n #import pudb; pudb.set_trace()\n if isinstance(intersection_object, Face):\n self.exit_device = intersection_object\n \n # Now change the properties of the photon accoring to what your surface does\n random_number = np.random.random_sample()\n if random_number < intersection_object.reflectivity:\n # Reflected\n self.direction = reflect_vector(intersection_object.shape.surface_normal(self.ray), self.direction)\n elif random_number < intersection_object.reflectivity + intersection_object.transmittance:\n # Transmitted\n pass\n else:\n # Loss\n self.active = False\n return self\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n \n # material-air or material-material interface\n # Are there duplicates intersection_points that are equal to the ray position?\n same_pt_indices = []\n for i in range(0,len(intersection_points)):\n if cmp_points(self.position, intersection_points[i]):\n same_pt_indices.append(i)\n assert len(same_pt_indices) < 3, \"An interface can only have 2 or 0 common intersection points.\"\n \n initialised_internally = None\n \n if len(same_pt_indices) == 2:\n intersection_object = self.container\n \n if self.container == intersection_object:\n \n # hitting internal interface -- for the case we are at an material-material interface (i.e. not travelling through air)\n initialised_internally = True\n \n if len(same_pt_indices) == 2:\n \n for obj in intersection_objects:\n if obj.shape.on_surface(intersection) and obj != self.container:\n #if obj != self.container:\n next_containing_object = obj\n \n \n else:\n # hitting internal interface -- for the case we are not at an interface\n next_containing_object = self.scene.container(self)\n \n assert self.container != next_containing_object, \"The current container cannot also be the next containing object after the ray is propagated.\"\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n else:\n # hitting external interface\n initialised_internally = False \n \n \n if len(same_pt_indices) == 2:\n for obj in intersection_objects:\n if obj != self.container:\n intersection_object = obj\n next_containing_object = obj\n else:\n next_containing_object = intersection_object\n \n #import pdb; pdb.set_trace()\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n if isinstance(next_containing_object, Collector):\n # If the photon hits an interface with e.g. a cell index-matched to it, then no reflection is to occur at this interface.\n reflection = 0.\n \n if np.random.random_sample() < reflection:\n # photon is reflected\n before = copy(self.direction)\n self.direction = reflect_vector(normal, self.direction)\n ang = angle(before, self.direction)\n \n if self.polarisation != None:\n \n #import pdb; pdb.set_trace()\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #1: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(np.degrees(angle(self.direction, self.polarisation)))\n \n self.propagate = False\n self.exit_device = self.container\n \n # invert polaristaion if n1 < n2\n if self.container.material.refractive_index < next_containing_object.material.refractive_index:\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation * -1.\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #2: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n if self.exit_device == self.scene.bounds or self.exit_device == None:\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right\"\n return self\n else:\n # photon is refracted through interface\n self.propagate = True\n before = copy(self.direction)\n ang = angle(before, self.direction)\n if initialised_internally:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, next_containing_object.material.refractive_index )\n \n if self.polarisation != None:\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #3: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n self.exit_device = self.container #LSC is the exit_device\n self.previous_container = self.container\n self.container = next_containing_object #Bounds is the container\n return self\n else:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, intersection_object.material.refractive_index )\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #4: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n # DJF 13.5.2010: This was crashing the statisical collection because it meant that an incident ray, hitting and transmitted, then lost would have bounds as the exit_device.\n #self.exit_device = self.container\n self.exit_device = intersection_object\n self.previous_container = self.container\n self.container = intersection_object\n return self", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def step(self):\n\n self.ball_x = self.ball_x + self.vel_x\n self.ball_y = self.ball_y + self.vel_y\n if self.ball_y >= 480:\n self.vel_y *= -1\n elif self.ball_y <= 0:\n self.vel_y *= -1\n if self.ball_x >= 640:\n self.vel_x *= -1\n elif self.ball_x <= 0:\n self.vel_x *= -1", "def refract(self, ray, rho):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n incidence = dot(-ray.direction, normal)\n complement = sqrt(1.0 - (1.0 - incidence**2) / rho**2)\n return Ray((ray.direction / rho +\n (incidence / rho - complement) * normal), ray.position)", "def move_agent(self, agent):\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('move agent{} {} {}'.format(id_, x, y))\n print('move agentr{} {} {}'.format(id_, x, y))", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def move(self, hex):\n # If current has nest, set nest location to unoccupied\n if self.hex is not None:\n self.hex.unoccupied()\n # Set nest site to new hexagon\n self.hex = hex\n # Update occupancy of new hexagon\n self.hex.occupied()\n self.history = []", "def rollout(self, current_state):\n while not self.state_manager.game_over():\n performed_action = self.make_move(current_state, is_rollout=True)\n current_state, _ = self.get_child(current_state, performed_action)\n reward = self.get_reward()\n return reward", "def move_draught_begin(event):\n global red_draughts, white_draughts\n global board_array\n global click_offset\n global old_point\n draught = board.find_withtag(CURRENT)[0]\n click_offset = [event.x-board.coords(draught)[0],event.y-board.coords(draught)[1]] #How far off the click is from the coordinates of the draught it's moving\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n if bottom == False:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the white bar\n old_point = 25\n else:\n old_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the red bar\n old_point = 0\n else:\n old_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))", "def move(self, direction):\n \n \"\"\" \n Moving snake is:\n - count new position of head depending on direction\n - if we out of boundaries => return -1\n - if snake' body already have this coordinate => return -1 (collide)\n - insert new head to self.snake beggining\n - cut tail unless our new head met food\n \"\"\"\n new_head = SnakeGame.computeCoord(self.snake[0], direction)\n if not (0 <= new_head[0] < self.height and 0 <= new_head[1] < self.width):\n return -1 # out of boundaried\n \n if self.food_stk and new_head == self.food_stk[-1]:\n self.score += 1\n self.food_stk.pop()\n else:\n self.__RemoveTail()\n \n if not self.__AddHead(new_head):\n return -1 # collision\n \n return self.score" ]
[ "0.6431586", "0.6042099", "0.5938072", "0.5936508", "0.592946", "0.58522797", "0.56683385", "0.5636461", "0.5598364", "0.55645293", "0.55484056", "0.5496541", "0.54507935", "0.543731", "0.5407489", "0.5379115", "0.53595096", "0.53525144", "0.534523", "0.5339988", "0.5306413", "0.5302718", "0.5302426", "0.53018546", "0.52990806", "0.5298072", "0.5278345", "0.52619886", "0.5254762", "0.5253217", "0.5234391", "0.52241033", "0.5210484", "0.5189818", "0.51767856", "0.51716876", "0.5171576", "0.5166489", "0.5146716", "0.5130627", "0.51211995", "0.51179224", "0.5111727", "0.5094128", "0.5082184", "0.50446886", "0.50295186", "0.5028519", "0.5025639", "0.5018301", "0.50121", "0.5005376", "0.4985789", "0.49713507", "0.49680024", "0.49675325", "0.4963293", "0.4958454", "0.49424535", "0.49323297", "0.49273267", "0.4924576", "0.4921287", "0.49158788", "0.49139613", "0.49123207", "0.49107647", "0.49060243", "0.49047258", "0.4903328", "0.4902983", "0.49028063", "0.49027187", "0.4897198", "0.4893521", "0.48865354", "0.48838073", "0.48763213", "0.4875693", "0.48728514", "0.48724404", "0.48622945", "0.4853213", "0.4852354", "0.4851767", "0.48417214", "0.48417214", "0.48411754", "0.48396784", "0.48359242", "0.4833272", "0.48312843", "0.4829923", "0.48146552", "0.480774", "0.4801283", "0.4800917", "0.4798301", "0.47973403", "0.4793033" ]
0.71545416
0
shoot_ray shoots a ray from a given row and column if possible
def shoot_ray(self, origin_row, origin_column): # get the the square object at row x column origin = self._board.get_board_square((origin_row, origin_column)) # check that it is a valid "edge" to send a ray from origin_check = origin.is_edge() # if it's not then return false if origin_check == False: return False # if we pass the origin check create shoot a new Ray.Ray object from row x column new_ray = Ray.Ray(origin_row, origin_column) # let the square we shot from know its an orign square origin.set_originating_ray(new_ray) # Deduct 1 from the score since we now have on exit point self.set_score(-1) # while the ray object has a direction (will be set to none when it reaches an endpoint) # send it to the helper function that will move it while new_ray.get_direction() != None: self.move_ray(new_ray) # if we hit an exit point (other than through reflection) deduct the point for that terminus = new_ray.get_terminal_location() # check the the terminal point is an edge (hitting an atom returns none as terminus) if terminus != None: # check that the terminus is not a reflection, which shouldn't be counted twice terminal_square = self._board.get_board_square(terminus) terminal_square.set_terminating_ray(new_ray) if terminus != (origin_row, origin_column): self.set_score(-1) return terminus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entrance position\n if not ray.can_continue(self.get_a_locations()):\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None\n # while there is no atom in front of ray and ray will not exit board --\n while ray.can_continue(self.get_a_locations()):\n ray.check_diags(self.get_a_locations())\n # moves ray forward one space\n ray.advance()\n # if ray will exit board by advancing --\n if not ray.on_board():\n # adjusts score if entrance/exit do not match prior entrances/exits\n self.mark_portal(ray.get_start(), ray.get_pos())\n # changes state to lose if score is now <= 0\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n # returns tuple of exit location\n return tuple(ray.get_pos())\n # if ray is blocked by atom --\n if not ray.no_atom(self.get_a_locations()):\n # changes state to lost if score is now <= 0\n self.mark_portal(ray.get_start())\n if self.get_score() <= 0:\n self.change_state(\"LOST\")\n return None", "def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"", "def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)", "def check_click(self, mouse_x, mouse_y):\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n\r\n if row in [0, 9] or column in [0, 9]:\r\n self.shoot_ray(row, column)\r\n elif 0 < row < 9 and 0 < column < 9:\r\n self.guess_atom(row, column)", "def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))", "def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None", "def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass", "def raytrace(pos1: tuple, pos2: tuple) -> list:\n x0, y0 = pos1\n x1, y1 = pos2\n tiles = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n x, y = x0, y0\n n = 1 + dx + dy\n x_inc = 1 if x1 > x0 else -1\n y_inc = 1 if y1 > y0 else -1\n error = dx - dy\n dx *= 2\n dy *= 2\n\n while n > 0:\n tiles.append((x, y))\n if error > 0:\n x += x_inc\n error -= dy\n else:\n y += y_inc\n error += dx\n n -= 1\n return tiles", "def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n \n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index, ray_target\n else:\n return None, None, None, ray_target", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)", "def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]", "def sling_action():\n global mouse_distance\n global rope_lenght\n global angle\n global x_mouse\n global y_mouse\n # Fixing bird to the sling rope\n v = vector((sling_x, sling_y), (x_mouse, y_mouse))\n uv = unit_vector(v)\n uv1 = uv[0]\n uv2 = uv[1]\n # mouse_distance = distance(sling_x, sling_y, x_mouse, y_mouse)\n sling = Vec2d(sling_x, sling_y)\n mouse = Vec2d(x_mouse, y_mouse)\n mouse_distance = (sling - mouse).length\n\n pu = (uv1*rope_lenght+sling_x, uv2*rope_lenght+sling_y)\n bigger_rope = 102\n x_redbird = x_mouse - 20\n y_redbird = y_mouse - 20\n if mouse_distance > rope_lenght:\n pux, puy = pu\n pux -= 20\n puy -= 20\n pul = pux, puy\n screen.blit(redbird, pul)\n pu2 = (uv1*bigger_rope+sling_x, uv2*bigger_rope+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu2, 5)\n screen.blit(redbird, pul)\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu2, 5)\n else:\n mouse_distance += 10\n pu3 = (uv1*mouse_distance+sling_x, uv2*mouse_distance+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu3, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu3, 5)\n # Angle of impulse\n dy = y_mouse - sling_y\n dx = x_mouse - sling_x\n if dx == 0:\n dx = 0.00000000000001\n angle = math.atan((float(dy))/dx)", "def direct(sun_pos, grid):\n\n # for each pixel at top of grid pass sun rays in\n for i in xrange(grid.gr.shape[0]):\n \"\"\"\n Make an array starting at loc\n \"\"\"\n xpos = i * grid.xres\n ypos = grid.zres * grid.zsize\n pos = np.array(xpos, ypos)\n direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus \n r = ray(pos, direction)\n \"\"\"\n The ray now travels down through the canopy being\n altered by transmission and reflectance\n\n amount of scattering vs absorption is determined by leaf area density\n\n \"\"\"", "def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)", "def check_pin_ball_hit(time_elapsed):\n\n pass", "def get_initial_rays_trig(bs,\n num_steps,\n fov,\n resolution,\n ray_start,\n ray_end,\n device, ):\n\n W, H = resolution\n # Create full screen NDC (-1 to +1) coords [x, y, 0, 1].\n # Y is flipped to follow image memory layouts.\n x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=device),\n torch.linspace(1, -1, H, device=device))\n x = x.T.flatten() # (HxW, ) [[-1, ..., 1], ...]\n y = y.T.flatten() # (HxW, ) [[1, ..., -1]^T, ...]\n z = -torch.ones_like(x, device=device) / np.tan((2 * math.pi * fov / 360) / 2) # (HxW, )\n\n rays_d_cam = normalize_vecs(torch.stack([x, y, z], -1)) # (HxW, 3)\n\n z_vals = torch.linspace(ray_start,\n ray_end,\n num_steps,\n device=device) \\\n .reshape(1, num_steps, 1) \\\n .repeat(W * H, 1, 1) # (HxW, n, 1)\n points = rays_d_cam.unsqueeze(1).repeat(1, num_steps, 1) * z_vals # (HxW, n_samples, 3)\n\n points = torch.stack(bs * [points]) # (b, HxW, n_samples, 3)\n z_vals = torch.stack(bs * [z_vals]) # (b, HxW, n_samples, 1)\n rays_d_cam = torch.stack(bs * [rays_d_cam]).to(device) # (b, HxW, 3)\n\n return points, z_vals, rays_d_cam", "def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])", "def point_on_ray(self, t=0.5):\n\n assert 0. <= t <=1., 't must be between 0 and 1'\n\n\n return self.detector_origin + (self._origin - self.detector_origin) * t", "def ai_shoot(self, gk, goal_x):\n\n angles = {\n 1: { # For team 1\n 'SHOOT_E': math.pi/4,\n 'SHOOT_D': 0,\n 'SHOOT_C': -math.pi/4,\n },\n 2: { # For team 2\n 'SHOOT_Q': math.pi*3/4,\n 'SHOOT_A': math.pi,\n 'SHOOT_Z': -math.pi*5/4,\n },\n }\n\n self_pos = P(self.pos.x, H-self.pos.y)\n gk_pos = P(gk.pos.x, H-gk.pos.y)\n\n possible_shots = []\n for k, v in angles[self.team_id].items():\n line = [ # Equation of line as A*x +B*y + C = 0\n math.sin(v), # x coeff\n -math.cos(v), # y coeff\n self_pos.y*math.cos(v) - self_pos.x*math.sin(v), # constant\n ]\n intersection_pt = -(line[2] + line[0]*goal_x)/line[1]\n if GOAL_POS[0]*H < intersection_pt < GOAL_POS[1]*H:\n possible_shots.append((-self.dist_to_line(line, gk_pos), k))\n\n if possible_shots:\n shot = sorted(possible_shots)[0][1]\n else:\n shot = 'NOTHING'\n\n return shot", "def shoot(self):\n pt = self.pt()\n assert pt >= 0\n m = self.mass()\n assert m >= 0\n sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 )\n y = self.rap()\n e = sqrt_pt2_m2 * math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False", "def sling_action():\n global mouse_distance\n global rope_length\n global angle\n global mouse_x_pos\n global mouse_y_pos\n\n #add code inside sling function\n # Fixing bird to the sling rope\n vec = vector((initial_x_sling, initial_y_sling), (mouse_x_pos, mouse_y_pos))\n unit_vec = unit_vector(vec)\n uv_1 = unit_vec[0]\n uv_2 = unit_vec[1]\n mouse_distance = distance(initial_x_sling, initial_y_sling, mouse_x_pos, mouse_y_pos) #point at which currrent bird id\n fix_pos = (uv_1*rope_length+initial_x_sling, uv_2*rope_length+initial_y_sling)\n highest_length = 102 #when stretched\n\n #to make bird stay within rope\n x_redbird = mouse_x_pos - 20\n y_redbird = mouse_y_pos - 20\n if mouse_distance > rope_length:\n pux, puy = fix_pos\n pux -= 20\n puy -= 20\n first_pos = pux, puy\n screen.blit(redbird, first_pos)\n second_pos = (uv_1*highest_length+initial_x_sling, uv_2*highest_length+initial_y_sling) #current position\n pygame.draw.line(screen, (255, 0, 0), (next_x_sling, next_y_sling), second_pos, 5) #catapult rope\n screen.blit(redbird, first_pos)\n pygame.draw.line(screen, (255, 0, 0), (initial_x_sling, initial_y_sling), second_pos, 5) #ANOTHER SIDE of catapult\n else:\n #when not fully stretched\n mouse_distance += 10\n third_pos = (uv_1*mouse_distance+initial_x_sling, uv_2*mouse_distance+initial_y_sling)\n pygame.draw.line(screen, (0, 0, 0), (next_x_sling, next_y_sling), third_pos, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (initial_x_sling, initial_y_sling), third_pos, 5)\n # Angle of impulse\n\n change_in_y = mouse_y_pos - initial_y_sling\n change_in_x = mouse_x_pos - initial_x_sling\n if change_in_x == 0:\n dx = 0.00000000000001\n angle = math.atan((float(change_in_y))/change_in_x)", "def shoot_fire(self, camera):\n\n cursor_pos = pygame.mouse.get_pos()\n tempMouseRect = pygame.Rect(cursor_pos, (0, 0))\n tempMouseRect = camera.use_cam_rect(tempMouseRect)\n\n relPos = tempMouseRect.topleft\n\n self.intMousePos = relPos\n ang = self.get_shoot_angle(relPos)\n #ang = math.radians(170 - math.degrees(ang))\n ang = math.radians(( (math.degrees(ang)+ 180 )))\n #ang = int(ang)\n\n if self.canShoot and self.ammo: #and self.is_good_angle(ang):\n self.canShoot = False\n self.ammo -= 1\n self.timer_fire = time.time()\n\n # decide starting position of fireball\n\n xPos = self.rect.centerx\n\n fire = powersC.Fireball((xPos, self.rect.centery), ang, self.direction)\n self.powerGroup.add(fire)", "def createRay(scorefn, resolution, opponent, ball, angle, maxBounces):\n scorefn = targetGoal['score']\n createRay(scorefn, pos, pos, angle, 3)", "def start_shooting(agent):\n agent.step = Step.Shooting\n target = shooting_target(agent)\n speed = get_speed(agent, target)\n agent.drive.target = target\n agent.drive.speed = speed", "def shoot(self, direction):\n self.type = self.boss.get_bullet_type()\n if self.type == 'shotgun':\n try:\n dx = abs(Laser.List[-1].x - self.x)\n dy = abs(Laser.List[-1].y - self.y)\n if dx < 50 and dy < 50 and self.type == 'shotgun':\n return\n except Exception:\n pass\n\n if(self.type == 'shotgun'):\n sound = pygame.mixer.Sound(Laser.sounds['shotgun'])\n else:\n sound = pygame.mixer.Sound(Laser.sounds['automatic'])\n sound.play()\n self.direction = direction\n self.velx = Laser.velocity[self.direction]['x']\n self.vely = Laser.velocity[self.direction]['y']\n\n if self.direction == 'n':\n south = pygame.transform.rotate(Laser.imgs[self.type], 90) # CCW\n self.img = pygame.transform.flip(south, False, True)\n\n if self.direction == 's':\n self.img = pygame.transform.rotate(Laser.imgs[self.type], 90) # CCW\n\n if self.direction == 'e':\n self.img = pygame.transform.flip(Laser.imgs[self.type], True, False)\n\n if self.direction == 'w':\n self.img = Laser.imgs[self.type]\n\n Laser.List.append(self)", "def move_ray(self, ray):\n\n # look to the next spot in the ray's trajectory\n next_coordinates = ray.get_next_location()\n next_location = self._board.get_board_square(next_coordinates)\n\n # check for a collisition - return if it occurs\n if ray.check_for_collision(next_location):\n return\n\n # if we didn't collide as we moved we need to look to check our\n # diagonals for atoms\n ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals()\n\n ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates)\n cw_diagonal = self._board.get_board_square(cw_diag_coordinates)\n\n if ccw_diagonal.is_atom() or cw_diagonal.is_atom():\n\n # If we're on our first move and the immediately diagonals contain an atom we have a reflection\n if ray.get_current_location() == ray.get_origin_location():\n\n terminal_square = self._board.get_board_square(\n ray.get_current_location())\n\n # let's the ray know it's finished and the square that it's an endpoint\n # self.end_ray(ray, terminal_square)\n return ray.record_edge_collision(terminal_square)\n\n # otherwise they cause a bend in the path\n else:\n # we have to calculate our trajectory based on the pull\n # of the atoms in our path\n ray.recalculate_trajectory(ccw_diagonal, cw_diagonal)\n\n # get the coordinates of the next location in our new trajectory\n next_coordinates = ray.get_next_location()\n\n # determine the next coordinate will result in a collision - return if it would\n if ray.check_for_collision(\n self._board.get_board_square(next_coordinates)):\n return\n\n # move the ray to the next step forward in its current trajectory\n ray.set_current_location(next_coordinates)\n\n # finally, recursively call our current function from the next step in its path.\n self.move_ray(ray)", "def ray(self):\n return self._ray", "def shoot(self):\n self.assertIsInstance(gun(3).shoot(), 2)\n self.assertIsInstance(gun(10).shoot(), 9)", "def shooting_target(agent):\n ball = agent.info.ball\n car = agent.info.my_car\n car_to_ball = ball.location - car.location\n backline_intersect = line_backline_intersect(\n agent.their_goal.center[1], vec2(car.location), vec2(car_to_ball))\n if abs(backline_intersect) < 700:\n goal_to_ball = normalize(car.location - ball.location)\n error = 0\n else:\n # Right of the ball\n if -500 > backline_intersect:\n target = agent.their_goal.corners[3] + vec3(400, 0, 0)\n # Left of the ball\n elif backline_intersect > 500:\n target = agent.their_goal.corners[2] - vec3(400, 0, 0)\n goal_to_ball = normalize(ball.location - target)\n # Subtract the goal to car vector\n difference = goal_to_ball - normalize(car.location - target)\n error = cap(abs(difference[0]) + abs(difference[1]), 0, 5)\n\n goal_to_ball_2d = vec2(goal_to_ball[0], goal_to_ball[1])\n test_vector_2d = dot(rotation(0.5 * math.pi), goal_to_ball_2d)\n test_vector = vec3(test_vector_2d[0], test_vector_2d[1], 0)\n\n distance = cap((40 + distance_2d(ball.location, car.location) * (error ** 2)) / 1.8, 0, 4000)\n location = ball.location + vec3((goal_to_ball[0] * distance), goal_to_ball[1] * distance, 0)\n\n # this adjusts the target based on the ball velocity perpendicular\n # to the direction we're trying to hit it\n multiplier = cap(distance_2d(car.location, location) / 1500, 0, 2)\n distance_modifier = cap(dot(test_vector, ball.velocity) * multiplier, -1000, 1000)\n location += vec3(\n test_vector[0] * distance_modifier, test_vector[1] * distance_modifier, 0)\n\n # another target adjustment that applies if the ball is close to the wall\n extra = 3850 - abs(location[0])\n if extra < 0:\n location[0] = cap(location[0], -3850, 3850)\n location[1] = location[1] + (-sign(agent.team) * cap(extra, -800, 800))\n return location", "def random_lookat_ray(goal, radius, variance, fov):\n theta1 = 2.*np.pi*np.random.uniform(-fov, fov)\n theta2 = np.arccos(1 - np.random.uniform(0, fov)**2)\n r = radius + variance*np.random.uniform(0,1.)\n x = r*np.cos(theta1)*np.sin(theta2)\n y = r*np.sin(theta1)*np.sin(theta2)\n z = r*np.cos(theta2)\n R = goal[:3,:3]\n point = goal[:3,3] + np.dot(R, np.array([x,y,z]))\n # Find the direction\n direction = -np.dot(R, np.array([x,y,z]))\n direction = tr.unit_vector(direction)\n return orpy.Ray(point, direction)", "def compute_ray(self, box):\n if box[0, -1] > 0:\n warnings.warn('Box should have negative Z values.')\n\n size_x = np.linalg.norm(box[5] - box[1])\n size_y = np.linalg.norm(box[3] - box[1])\n size_z = np.linalg.norm(box[2] - box[1])\n size = np.asarray([size_x, size_y, size_z])\n box_o = Box.UNIT_BOX * size\n box_oh = np.ones((4, 9))\n box_oh[:3] = np.transpose(box_o)\n\n box_ch = np.ones((4, 9))\n box_ch[:3] = np.transpose(box)\n box_cht = np.transpose(box_ch)\n\n box_oct = np.matmul(box_oh, box_cht)\n box_cct_inv = np.linalg.inv(np.matmul(box_ch, box_cht))\n transform = np.matmul(box_oct, box_cct_inv)\n return transform[:3, 3:].reshape((3))", "def step(ray, points, nodes, renderer=None):\n container, to_node, surface_node = ray_status(ray, points, nodes)\n min_point = ray.position\n max_point = points[0]\n \n dist = distance_between(min_point, max_point)\n _ray = ray\n for (ray, decision) in trace_path(ray, container, dist):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n\n if to_node is None and container.parent is None:\n # Case: Hit world node; kill ray here.\n ray = replace(ray, is_alive=False)\n yield ray, Decision.KILL\n elif points_equal(ray.position, max_point):\n # Case: Hit surface\n # NB The ray argument of `trace_surface` *must* be a ray on the surface of the \n # node and the returned ray must *not* be on the node!\n before_ray = ray\n _ray = ray\n for ray, decision in trace_surface(ray, container, to_node, surface_node):\n if renderer:\n renderer.add_ray_path([_ray, ray])\n _ray = ray\n yield ray, decision\n # Avoid error checks in production\n if __debug__:\n local_ray = ray.representation(surface_node.root, surface_node)\n if surface_node.geometry.is_on_surface(local_ray.position):\n logger.warning(\"(before) pos: {}\".format(before_ray.position))\n logger.warning(\"(after) pos: {}\".format(ray.position))\n raise TraceError(\"After tracing a surface the ray cannot still be on the surface.\")", "def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:\n fx = 10\n fy = 10\n\n img_w = 100\n img_h = 50\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n uv_list = []\n for u in range(img_w):\n for v in range(img_h):\n uv_list += [(u, v)]\n\n uv: NDArrayInt = np.array(uv_list)\n assert uv.shape == (img_w * img_h, 2)\n\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n # compare w/ vectorized, should be identical\n for i, ray_dir_vec in enumerate(ray_dirs):\n u, v = uv[i]\n ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n assert np.allclose(ray_dir_vec, ray_dir_nonvec)", "def actions(self, ship: SpaceShip, input_data: Dict[str, Tuple]) -> None:\n # ship.turn_rate = 180.0\n ship.thrust = ship.thrust_range[1]\n ship.shoot()", "def shooting(agent):\n ball = agent.info.ball\n car = agent.info.my_car\n our_goal = agent.my_goal.center\n target = shooting_target(agent)\n agent.drive.target = target\n distance = distance_2d(car.location, target)\n vf = velocity_forward(car)\n dodge_overshoot = distance < (abs(vf) + 500) * 1.4\n #agent.drive.speed = get_speed(agent, target)\n agent.drive.speed = 2200\n agent.drive.step(agent.info.time_delta)\n agent.controls = agent.drive.controls\n if agent.defending:\n agent.step = Step.Defending\n elif should_dodge(agent):\n agent.step = Step.Dodge\n agent.dodge = Dodge(car)\n agent.dodge.duration = 0.1\n agent.dodge.target = ball.location\n elif agent.ball_bouncing and not (abs(ball.velocity[2]) < 100\n and sign(agent.team) * ball.velocity[1] < 0) and get_bounce(agent) is not None:\n agent.step = Step.Catching\n agent.drive.target = ball.location\n agent.drive.speed = 1399\n elif vf < -900 and (not dodge_overshoot or distance < 600):\n agent.step = Step.HalfFlip\n agent.halfflip = HalfFlip(car)\n elif not dodge_overshoot and car.location[2] < 80 and\\\n (agent.drive.speed > abs(vf) + 300 and 1200 < abs(vf) < 2000 and car.boost <= 15):\n # Dodge towards the target for speed\n agent.step = Step.Dodge\n agent.dodge = Dodge(car)\n agent.dodge.duration = 0.1\n agent.dodge.target = target", "def rayleigh(v0):\r\n # Need to sample the angle theta from the phase function\r\n loop_condition = True\r\n while loop_condition:\r\n eps = random.random()*np.pi # Sampled x coordinate from 0 to pi\r\n eta = random.random()*(3/4)*2 # Sampled y coordinate from 0 to max of Rayleigh phase function for unpolarised light\r\n if eta < 3/4*(1 + (np.cos(eps))**2): # Checks if eta is less than the Rayleigh phase function using the angle eps\r\n loop_condition = False\r\n \r\n # Get a new direction vector for the photon\r\n v = scattering_direction(v0, eps)\r\n return v", "def clashTest(self, px, py, pz, rad):\r\n radSq = rad**2\r\n # adjust for map not set at origin\r\n px -= self.unif[0]\r\n py -= self.unif[1]\r\n pz -= self.unif[2]\r\n ht = self.height/255\r\n halfw = self.width/2.0\r\n halfd = self.depth/2.0\r\n dx = self.width/self.ix\r\n dz = self.depth/self.iy\r\n\r\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\r\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\r\n if x0 < 0: x0 = 0\r\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\r\n if x1 > self.ix-1: x1 = self.ix-1\r\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\r\n if z0 < 0: z0 = 0\r\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\r\n if z1 > self.iy-1: z1 = self.iy-1\r\n\r\n # go through grid around px, pz\r\n minDist, minLoc = 1000000, (0, 0)\r\n for i in xrange(x0+1, x1):\r\n for j in xrange(z0+1, z1):\r\n # use the locations stored in the one dimensional vertices matrix\r\n #generated in __init__. 3 values for each location\r\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\r\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\r\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\r\n vertp = self.buf[0].vertices[p]\r\n normp = self.buf[0].normals[p]\r\n # work out distance squared from this vertex to the point\r\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\r\n if distSq < minDist: # this vertex is nearest so keep a record\r\n minDist = distSq\r\n minLoc = (i, j)\r\n #now find the distance between the point and the plane perpendicular\r\n #to the normal at this vertex\r\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\r\n [-normp[0], -normp[1], -normp[2]])\r\n #and the position where the normal from point crosses the plane\r\n xIsect = px - normp[0]*pDist\r\n zIsect = pz - normp[2]*pDist\r\n\r\n #if the intersection point is in this rectangle then the x,z values\r\n #will lie between edges\r\n if xIsect > self.buf[0].vertices[p1][0] and \\\r\n xIsect < self.buf[0].vertices[p][0] and \\\r\n zIsect > self.buf[0].vertices[p2][2] and \\\r\n zIsect < self.buf[0].vertices[p][2]:\r\n pDistSq = pDist**2\r\n # finally if the perpendicular distance is less than the nearest so far\r\n #keep a record\r\n if pDistSq < minDist:\r\n minDist = pDistSq\r\n minLoc = (i,j)\r\n\r\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\r\n if gLevel > (py-rad):\r\n minDist = py - gLevel\r\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\r\n\r\n if minDist <= radSq: #i.e. near enough to clash so return normal\r\n p = minLoc[1]*self.ix + minLoc[0]\r\n normp = self.buf[0].normals[p]\r\n if minDist < 0:\r\n jump = rad - minDist\r\n else:\r\n jump = 0\r\n return(True, normp[0], normp[1], normp[2], jump)\r\n else:\r\n return (False, 0, 0, 0, 0)", "def main_ray_cast(self, context, event):\r\n # get the context arguments\r\n MPM = bpy.context.window_manager.MPM\r\n scene = context.scene\r\n region = context.region\r\n rv3d = context.region_data\r\n coord = event.mouse_region_x, event.mouse_region_y\r\n \r\n # get the ray from the viewport and mouse\r\n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\r\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\r\n \r\n ray_target = ray_origin + view_vector\r\n \r\n def visible_objects_and_duplis():\r\n \"\"\"Loop over (object, matrix) pairs (mesh only)\"\"\"\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()\r\n \r\n def obj_ray_cast(obj, matrix):\r\n \"\"\"Wrapper for ray casting that moves the ray into object space\"\"\"\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None\r\n \r\n # cast rays and find the closest object\r\n best_length_squared = -1.0\r\n best_obj = None\r\n \r\n for obj, matrix in visible_objects_and_duplis():\r\n if obj.type == 'MESH':\r\n hit, normal, face_index = obj_ray_cast(obj, matrix)\r\n if hit is not None:\r\n hit_world = matrix * hit\r\n length_squared = (hit_world - ray_origin).length_squared\r\n if best_obj is None or length_squared < best_length_squared:\r\n best_length_squared = length_squared\r\n best_obj = obj\r\n \r\n if best_obj is not None:\r\n if self.on_curve:\r\n if best_obj != bpy.context.active_object:\r\n if self.choose_start:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap == best_obj else best_obj\r\n self.choose_start = False\r\n \r\n if self.choose_end:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap == best_obj else best_obj\r\n self.choose_end = False\r\n \r\n if self.choose_profile:\r\n \r\n curve = bpy.context.active_object.modifiers[\"Curve\"].object\r\n start = bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap else \"\"\r\n end = bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap else \"\"\r\n \r\n bpy.ops.object.modifier_remove(modifier = \"Array_on_curve\")\r\n bpy.ops.object.modifier_remove(modifier = \"Curve\")\r\n bpy.context.active_object.select = False\r\n best_obj.select = True\r\n bpy.context.scene.objects.active = best_obj\r\n best_obj.modifiers.new(\"Array_on_curve\", 'ARRAY')\r\n MPM.array_name = \"Array_on_curve\"\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[self.axis_value] = 1\r\n for i in range(3):\r\n if i != self.axis_value:\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[i]=0\r\n best_obj.modifiers[\"Array_on_curve\"].fit_type = 'FIT_CURVE'\r\n best_obj.modifiers[\"Array_on_curve\"].curve = curve\r\n best_obj.modifiers[\"Array_on_curve\"].use_merge_vertices = True\r\n if start:\r\n best_obj.modifiers[\"Array_on_curve\"].start_cap = start if start != best_obj else None\r\n \r\n if end:\r\n best_obj.modifiers[\"Array_on_curve\"].end_cap = end if end != best_obj else None\r\n \r\n # setup the curve modifier\r\n best_obj.modifiers.new(\"Curve\", 'CURVE')\r\n best_obj.modifiers[\"Curve\"].object = curve\r\n self.setup_deform_axis(best_obj.modifiers, self.axis_value)\r\n \r\n self.choose_profile = False", "def shoot(self, point: Point) -> Tuple[bool, bool, ShipType]:\n\n # Shot off board\n if not self.point_in_board(point):\n raise InvalidShotException(f'{point} is not on the board')\n\n # Point has already been shot\n elif self.point_is_shot(point):\n raise InvalidShotException(f'{point} has already been shot')\n\n else:\n self.shot_locations.add(point)\n is_hit = True if point in self.all_ship_locations else False\n is_sunk = False\n ship_sunk = None\n\n if is_hit:\n # find out which one of the ships was shot\n for k, v in self.individual_ship_locations.items():\n # if v was the ship that was shot\n if point in v:\n # remove the point from v\n v.remove(point)\n if len(v) == 0:\n is_sunk = True\n ship_sunk = k\n\n return is_hit, is_sunk, ship_sunk", "def shoot(self):\n shots = Shooting(self.rect.centerx, self.rect.bottom)\n # Adding the shots to sprite lists created\n all_sprites_list.add(shots)\n shooting_list.add(shots)", "def clashTest(self, px, py, pz, rad):\n radSq = rad**2\n # adjust for map not set at origin\n px -= self.unif[0]\n py -= self.unif[1]\n pz -= self.unif[2]\n ht = self.height/255\n halfw = self.width/2.0\n halfd = self.depth/2.0\n dx = self.width/self.ix\n dz = self.depth/self.iy\n\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\n if x0 < 0: x0 = 0\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\n if x1 > self.ix-1: x1 = self.ix-1\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\n if z0 < 0: z0 = 0\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\n if z1 > self.iy-1: z1 = self.iy-1\n\n # go through grid around px, pz\n minDist, minLoc = 1000000, (0, 0)\n for i in xrange(x0+1, x1):\n for j in xrange(z0+1, z1):\n # use the locations stored in the one dimensional vertices matrix\n #generated in __init__. 3 values for each location\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\n vertp = self.buf[0].vertices[p]\n normp = self.buf[0].normals[p]\n # work out distance squared from this vertex to the point\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\n if distSq < minDist: # this vertex is nearest so keep a record\n minDist = distSq\n minLoc = (i, j)\n #now find the distance between the point and the plane perpendicular\n #to the normal at this vertex\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\n [-normp[0], -normp[1], -normp[2]])\n #and the position where the normal from point crosses the plane\n xIsect = px - normp[0]*pDist\n zIsect = pz - normp[2]*pDist\n\n #if the intersection point is in this rectangle then the x,z values\n #will lie between edges\n if xIsect > self.buf[0].vertices[p1][0] and \\\n xIsect < self.buf[0].vertices[p][0] and \\\n zIsect > self.buf[0].vertices[p2][2] and \\\n zIsect < self.buf[0].vertices[p][2]:\n pDistSq = pDist**2\n # finally if the perpendicular distance is less than the nearest so far\n #keep a record\n if pDistSq < minDist:\n minDist = pDistSq\n minLoc = (i,j)\n\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\n if gLevel > (py-rad):\n minDist = py - gLevel\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\n\n if minDist <= radSq: #i.e. near enough to clash so return normal\n p = minLoc[1]*self.ix + minLoc[0]\n normp = self.buf[0].normals[p]\n if minDist < 0:\n jump = rad - minDist\n else:\n jump = 0\n return(True, normp[0], normp[1], normp[2], jump)\n else:\n return (False, 0, 0, 0, 0)", "def draw_shot(self, dist, stepsize):\r\n if stepsize < 1:\r\n return dist\r\n \r\n self.image = self.base_image.copy()\r\n shoot_to = (self.rect.w/2 + math.cos(self.direction)*dist,\r\n self.rect.h/2 + math.sin(self.direction)*dist)\r\n \r\n pygame.draw.line(self.image, self.shot_color, (self.rect.w/2,self.rect.h/2), shoot_to, 1)\r\n #the third argument is a threshold value. Apparently, it doesn't work without it.\r\n self.mask = pygame.mask.from_threshold(self.image, self.shot_color, (1,1,1))\r\n\r\n if(self.world.visible_objects(self, self.world.walls)):\r\n return self.draw_shot(dist-stepsize, stepsize/2)\r\n else:\r\n return self.draw_shot(dist+stepsize, stepsize/2)", "def render_solids(display_surface):\r\n\r\n global depth_buffer\r\n if depth_buffer is None:\r\n depth_buffer = [0] * display_surface.get_width()\r\n\r\n map_width = len(main.game_map)\r\n map_height = len(main.game_map[0])\r\n for x in range(display_surface.get_width()):\r\n # figure out the ray angle\r\n ray_angle = (settings.playerA - settings.FOV / 2) + (x / display_surface.get_width()) * settings.FOV\r\n\r\n # initialize flags\r\n dist_player_to_wall = 0\r\n hit_wall = False\r\n boundary = False\r\n hit_portal = False\r\n hit_enemy = False\r\n last_sector = (-1, -1)\r\n\r\n eye_x = math.sin(ray_angle)\r\n eye_y = math.cos(ray_angle)\r\n\r\n sector_offset_x = 0\r\n sector_offset_y = 0\r\n current_ray_step = settings.RAY_STEP\r\n\r\n # move ray until it hits something\r\n while not hit_wall and not hit_portal and not hit_enemy:\r\n # ray acceleration\r\n dist_player_to_wall += current_ray_step\r\n if settings.ray_acceleration:\r\n current_ray_step += settings.RAY_STEP / 20\r\n\r\n # figure out roughly where the ray is at\r\n test_x = int(sector_offset_x + settings.playerX + eye_x * dist_player_to_wall)\r\n test_y = int(sector_offset_y + settings.playerY + eye_y * dist_player_to_wall)\r\n\r\n # check if the ray intersects with anything on the map\r\n if 0 > test_x >= map_width or 0 > test_y >= map_height:\r\n hit_wall = True\r\n dist_player_to_wall = settings.DEPTH\r\n\r\n elif dist_player_to_wall > settings.DEPTH:\r\n hit_wall = True\r\n dist_player_to_wall = settings.DEPTH\r\n\r\n elif (test_x, test_y) in [_.location for _ in settings.sectors]:\r\n sector = [_ for _ in settings.sectors if _.location == (test_x, test_y)][0]\r\n if sector.location != last_sector:\r\n sector_offset_x -= sector.location[0] - sector.vector[0]\r\n sector_offset_y -= sector.location[1] - sector.vector[1]\r\n last_sector = sector.vector\r\n\r\n elif main.game_map[test_y][test_x] == \"#\":\r\n hit_wall = True\r\n\r\n # check if corner\r\n vector = []\r\n # all 4 corners\r\n for tx in range(2):\r\n for ty in range(2):\r\n vy = (test_y - sector_offset_y) + ty - settings.playerY\r\n vx = (test_x - sector_offset_x) + tx - settings.playerX\r\n d = math.sqrt(vx*vx + vy*vy)\r\n dot = (eye_x * vx / d) + (eye_y * vy / d)\r\n vector.append((d, dot))\r\n\r\n # sort vector by closest distance\r\n def sort_vector(e):\r\n # sorting function\r\n return e[0]\r\n vector.sort(key=sort_vector)\r\n\r\n if math.acos(vector[0][1]) < settings.BOUND:\r\n boundary = True\r\n if math.acos(vector[1][1]) < settings.BOUND:\r\n boundary = True\r\n\r\n elif main.game_map[test_y][test_x] == \"@\":\r\n hit_portal = True\r\n\r\n elif main.game_map[test_y][test_x] == \"e\":\r\n hit_enemy = True\r\n\r\n ceil = display_surface.get_height() / 2 - display_surface.get_height() / dist_player_to_wall\r\n flor = display_surface.get_height() - ceil\r\n\r\n # add solid to depth buffer, not including sectors\r\n depth_buffer[x] = dist_player_to_wall\r\n\r\n # calculate colors based off distance and fade factor\r\n color = int(settings.MAX_FADE - (dist_player_to_wall / settings.DEPTH * settings.MAX_FADE))\r\n wall_shade = (color, color, color)\r\n\r\n # if hit a boundary\r\n if boundary:\r\n wall_shade = (0, 0, 0)\r\n\r\n if hit_portal:\r\n wall_shade = (255, 0, 0)\r\n\r\n if hit_enemy:\r\n wall_shade = (0, 255, 0)\r\n\r\n # optimization for drawing lines instead of by pixel\r\n pygame.draw.line(display_surface, (0, 0, 0), (x, 0), (x, ceil))\r\n pygame.draw.line(display_surface, wall_shade, (x, ceil), (x, flor))\r\n pygame.draw.line(display_surface, (0, 0, 255), (x, flor), (x, display_surface.get_height()))", "def ray_trace(x, y, poly):\n\n @vectorize([bool_(float64, float64)])\n def ray(x, y):\n # where xy is a coordinate\n n = len(poly)\n inside = False\n p2x = 0.0\n p2y = 0.0\n xints = 0.0\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside\n\n return ray(x, y)", "def shoot(self):\n theta = self.theta()\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def trace(self):\n\n \n assert self.scene != None, \"The photon's scene variable is not set.\"\n \n intersection_points, intersection_objects = self.scene.intersection(self.ray)\n\n \"\"\"\n #DIAGNOSTICS\n print \"\\nnew\\n\"\n print self.position, self.direction, \"\\n\"\n print intersection_points, \"\\n\"\n for i in range(0, len(intersection_objects)):\n print \"Object: \", intersection_objects[i].name, \" - Intersection: \", intersection_points[i]\n \"\"\"\n \n assert intersection_points != None, \"The ray must intersect with something in the scene to be traced.\"\n \n if self.container is None:\n self.container = self.scene.container(self)\n assert self.container != None, \"Container of ray cannot be found.\"\n \n #import pdb; pdb.set_trace()\n #import pudb; pudb.set_trace()\n intersection_points, intersection_objects = Scene.sort(intersection_points, intersection_objects, self, container=self.container, show_log=self.show_log)\n \n # find current intersection point and object -- should be zero if the list is sorted!\n intersection = closest_point(self.position, intersection_points)\n for i in range(0,len(intersection_points)):\n if list(intersection_points[i]) == list(intersection):\n index = i\n break\n \n #import pdb; pdb.set_trace()\n intersection_object = intersection_objects[index]\n assert intersection_object != None, \"No intersection points can be found with the scene.\"\n \n \n \"\"\"\n #DIAGNOSTICS\n print \"\\n\", intersection, \"\\n\"\n print intersection_object.name \n \"\"\" \n \n \n # Reached scene boundaries?\n if intersection_object is self.scene.bounds:\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n\n\n # Reached a RayBin (kind of perfect absorber)?\n if isinstance(intersection_object, RayBin):\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n \n \n # Here we trace the ray through a Coating\n if isinstance(self.container, Coating):\n normal = intersection_object.shape.surface_normal(self.ray)\n self = self.container.material.trace(self, normal, separation(self.position, intersection))\n self.exit_device = self.container\n self.previous_container = self.container\n self.container = self.scene.container(self)\n return self\n \n \n # Here we determine if the Coating has been hit\n if isinstance(intersection_object, Coating) and intersection_object.shape.on_surface(self.position):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right.\"\n return self\n \n \n # Here we trace the ray through a Material\n self = self.container.material.trace(self, separation(self.position, intersection))\n \n \n # Lost in material?\n # Photon has been re-absorbed but NOT re-emitted, i.e. is inactive\n if not self.active:\n #01/04/10: Unification --> Next two lines came from older Trace version\n self.exit_device = self.container\n self.exit_material = self.container.material\n return self \n \n # Reaches interface\n # Photon has been re-absorbed AND re-emitted, i.e. is still active\n ray_on_surface = intersection_object.shape.on_surface(self.position)\n if not ray_on_surface: \n self.exit_device = self.container\n return self\n \n # Ray has reached a surface of some description, increment the intersection counter\n self.intersection_counter += 1\n \n # If we reach an reflective material then we don't need to follow \n # this logic we can just return\n if ray_on_surface and isinstance(intersection_object, Coating):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n return self\n \n # KARLG NEW CODE HERE\n #import pudb; pudb.set_trace()\n if isinstance(intersection_object, Face):\n self.exit_device = intersection_object\n \n # Now change the properties of the photon accoring to what your surface does\n random_number = np.random.random_sample()\n if random_number < intersection_object.reflectivity:\n # Reflected\n self.direction = reflect_vector(intersection_object.shape.surface_normal(self.ray), self.direction)\n elif random_number < intersection_object.reflectivity + intersection_object.transmittance:\n # Transmitted\n pass\n else:\n # Loss\n self.active = False\n return self\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n \n # material-air or material-material interface\n # Are there duplicates intersection_points that are equal to the ray position?\n same_pt_indices = []\n for i in range(0,len(intersection_points)):\n if cmp_points(self.position, intersection_points[i]):\n same_pt_indices.append(i)\n assert len(same_pt_indices) < 3, \"An interface can only have 2 or 0 common intersection points.\"\n \n initialised_internally = None\n \n if len(same_pt_indices) == 2:\n intersection_object = self.container\n \n if self.container == intersection_object:\n \n # hitting internal interface -- for the case we are at an material-material interface (i.e. not travelling through air)\n initialised_internally = True\n \n if len(same_pt_indices) == 2:\n \n for obj in intersection_objects:\n if obj.shape.on_surface(intersection) and obj != self.container:\n #if obj != self.container:\n next_containing_object = obj\n \n \n else:\n # hitting internal interface -- for the case we are not at an interface\n next_containing_object = self.scene.container(self)\n \n assert self.container != next_containing_object, \"The current container cannot also be the next containing object after the ray is propagated.\"\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n else:\n # hitting external interface\n initialised_internally = False \n \n \n if len(same_pt_indices) == 2:\n for obj in intersection_objects:\n if obj != self.container:\n intersection_object = obj\n next_containing_object = obj\n else:\n next_containing_object = intersection_object\n \n #import pdb; pdb.set_trace()\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n if isinstance(next_containing_object, Collector):\n # If the photon hits an interface with e.g. a cell index-matched to it, then no reflection is to occur at this interface.\n reflection = 0.\n \n if np.random.random_sample() < reflection:\n # photon is reflected\n before = copy(self.direction)\n self.direction = reflect_vector(normal, self.direction)\n ang = angle(before, self.direction)\n \n if self.polarisation != None:\n \n #import pdb; pdb.set_trace()\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #1: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(np.degrees(angle(self.direction, self.polarisation)))\n \n self.propagate = False\n self.exit_device = self.container\n \n # invert polaristaion if n1 < n2\n if self.container.material.refractive_index < next_containing_object.material.refractive_index:\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation * -1.\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #2: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n if self.exit_device == self.scene.bounds or self.exit_device == None:\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right\"\n return self\n else:\n # photon is refracted through interface\n self.propagate = True\n before = copy(self.direction)\n ang = angle(before, self.direction)\n if initialised_internally:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, next_containing_object.material.refractive_index )\n \n if self.polarisation != None:\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #3: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n self.exit_device = self.container #LSC is the exit_device\n self.previous_container = self.container\n self.container = next_containing_object #Bounds is the container\n return self\n else:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, intersection_object.material.refractive_index )\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #4: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n # DJF 13.5.2010: This was crashing the statisical collection because it meant that an incident ray, hitting and transmitted, then lost would have bounds as the exit_device.\n #self.exit_device = self.container\n self.exit_device = intersection_object\n self.previous_container = self.container\n self.container = intersection_object\n return self", "def step(self, crowd):\n\n for boid in crowd:\n random_int = random.randint(0, 5)\n\n # if random_int > 4:\n # random_int = random.randint(0, 5)\n # if random_int > 4:\n # for i in range (1, 500):\n # goalX, goalY = self.goals[boid.goalNr]\n # x, y = boid.position\n\n # if (goalX + 10 >= x >= goalX - 10) and (goalY + 10 >= y >= goalY - 10):\n # boid.reached_goal(goalX + 10, goalY + 10)\n\n # dx = random.randint(0, self.width) - x\n # dy = random.randint(0, self.height) - y\n\n # # Unit vector in the same direction\n # distance = math.sqrt(dx * dx + dy * dy)\n # dx /= distance\n # dy /= distance\n\n # # And now we move:\n # x += dx\n # y += dy\n\n # boid.set_goal(dx, dy)\n\n # boid.position += boid.velocity\n #else:\n # boid.position += boid.velocity\n \n # Vector from me to cursor\n\n\n goalX, goalY = self.goals[boid.goalNr]\n x, y = boid.position\n\n if (goalX + 10 >= x >= goalX - 10) and (goalY + 10 >= y >= goalY - 10):\n boid.reached_goal(goalX + 10, goalY + 10)\n\n else:\n dx = goalX - x\n dy = goalY - y\n\n # Unit vector in the same direction\n # distance = np.linalg.norm(dx * dx + dy * dy)\n distance = math.sqrt(dx * dx + dy * dy)\n dx /= distance\n dy /= distance\n\n # And now we move:\n x += dx\n y += dy\n\n boid.set_goal(dx, dy)\n\n boid.position += boid.velocity", "def act(self, action):\n\n self.get_frame(int(self.t))\n\n self.position = np.zeros((self.grid_size, self.grid_size))\n\n self.position[0:2,:]= -1\n self.position[:,0:2] = -1\n self.position[-2:, :] = -1\n self.position[:, -2:] = -1\n\n self.position[self.x, self.y] = 1\n if action == 0:\n if self.x == self.grid_size-3:\n self.x = self.x-1\n else:\n self.x = self.x + 1\n elif action == 1:\n if self.x == 2:\n self.x = self.x+1\n else:\n self.x = self.x-1\n elif action == 2:\n if self.y == self.grid_size - 3:\n self.y = self.y - 1\n else:\n self.y = self.y + 1\n elif action == 3:\n if self.y == 2:\n self.y = self.y + 1\n else:\n self.y = self.y - 1\n else:\n RuntimeError('Error: action not recognized')\n\n self.t = self.t + 1\n reward = self.board[self.x, self.y]\n \n self.hunter_move()\n removals = []\n for i in range(len(self.h_x)):\n if self.x == self.h_x[i] and self.y == self.h_y[i]:\n reward -= 100\n removals.append(i)\n \n for i in sorted(removals, reverse=True):\n del self.h_x[i]\n del self.h_y[i]\n \n self.board[self.x, self.y] = 0\n self.board_with_hunters[:,:] = 0\n \n for i in range(len(self.h_x)):\n self.board_with_hunters[self.h_x[i],self.h_y[i]] = -100\n \n self.trajectory[self.x,self.y] = 1\n game_over = self.t > self.max_time\n state = np.concatenate((self.board.reshape(self.grid_size, self.grid_size,1),\n self.position.reshape(self.grid_size, self.grid_size,1),\n self.trajectory.reshape(self.grid_size, self.grid_size,1),\n self.board_with_hunters.reshape(self.grid_size, self.grid_size,1)),axis=2)\n state = state[self.x-2:self.x+3,self.y-2:self.y+3,:]\n\n return state, reward, game_over", "def move_draught(event):\n global red_turn\n if(red_turn == False):\n return\n draught = board.find_withtag(CURRENT)[0]\n board.coords(draught,event.x-click_offset[0],event.y-click_offset[1],event.x-click_offset[0]+board_divisions,event.y-click_offset[1]+board_divisions)", "def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray", "def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray", "def shoot(self):\n e = self.energy()\n y = self.rap()\n sqrt_pt2_m2 = e / math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n m = self.mass()\n pt = math.sqrt( sqrt_pt2_m2**2 - m**2 )\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def generate_ray(self, img_point):\n # TODO A5 copy your implementation from A4\n i = img_point[0]\n j = img_point[1]\n dist_vector = self.target - self.eye\n proj_dist = np.linalg.norm(dist_vector)\n height = 2 * proj_dist * np.tan(self.vfov / 2.0)\n width = self.aspect * height\n left = (-1) * width / 2.0\n bottom = (-1) * height / 2.0\n u = i * width + left\n v = j * height + bottom\n ray_origin = self.eye\n ray_direction = ((-1) * proj_dist * self.w) + u * self.u + v * self.v\n return Ray(ray_origin, ray_direction)", "def analysis_function_raytrace(system, wavelength_idx, config, spaxels_per_slice, surface, ignore_vignetting):\n start0 = time()\n\n # Set Current Configuration\n system.MCE.SetCurrentConfiguration(config)\n\n # Get the Field Points for that configuration\n sysField = system.SystemData.Fields\n N_fields = sysField.NumberOfFields\n N_waves = len(wavelength_idx)\n N_rays = N_waves * spaxels_per_slice\n\n fx_min, fy_min = sysField.GetField(1).X, sysField.GetField(1).Y\n fx_max, fy_max = sysField.GetField(N_fields).X, sysField.GetField(N_fields).Y\n\n # Watch Out! here we are assuming Rectangular Normalization\n X_MAX = np.max([np.abs(sysField.GetField(i + 1).X) for i in range(N_fields)])\n Y_MAX = np.max([np.abs(sysField.GetField(i + 1).Y) for i in range(N_fields)])\n\n # Normalized field coordinates (hx, hy)\n hx_min, hx_max = fx_min / X_MAX, fx_max / X_MAX\n hy_min, hy_max = fy_min / Y_MAX, fy_max / Y_MAX\n\n hx = np.linspace(hx_min, hx_max, spaxels_per_slice)\n hy = np.linspace(hy_min, hy_max, spaxels_per_slice)\n\n # The Field coordinates for the Object\n obj_xy = np.array([X_MAX * hx, Y_MAX * hy]).T\n foc_xy = np.empty((N_waves, spaxels_per_slice, 2))\n\n raytrace = system.Tools.OpenBatchRayTrace()\n normUnPolData = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, surface)\n\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n # Add the ray to the RayTrace\n normUnPolData.AddRay(wave_idx, h_x, h_y, 0, 0, constants.OPDMode_None)\n\n # Run the RayTrace for the whole Slice\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n # time_ray = time() - start\n # print(\"Time spent running Raytrace: %.3f sec\" % time_ray)\n\n # start = time()\n normUnPolData.StartReadingResults()\n\n # Retrieve the results for the operands and raytrace\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n output = normUnPolData.ReadNextResult()\n if ignore_vignetting == False:\n # We do care about vignetting\n if output[2] == 0 and output[3] == 0:\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n elif output[2] == 0 and output[3] != 0:\n vignet_code = output[3]\n vignetting_surface = system.LDE.GetSurfaceAt(vignet_code).Comment\n print(\"\\nConfig #%d | Wavelength idx #%d\" % (config, wave_idx))\n fx, fy = h_x * X_MAX, h_y * Y_MAX\n print(\"Field point #%d : hx=%.4f hy=%.4f | fx=%.4f, fy=%.4f\" % (j_field + 1, h_x, h_y, fx, fy))\n print(\"Vignetting at surface #%d: %s\" % (vignet_code, vignetting_surface))\n else:\n # If we don't care about vignetting (rays falling outside the active area of the detector, for example)\n # we add the Raytrace results to the focal coordinates array no matter what\n if output[2] == 0:\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n normUnPolData.ClearData()\n CastTo(raytrace, 'ISystemTool').Close()\n # time_res = time() - start\n # print(\"Time spent reading results: %.3f sec\" % time_res)\n\n return [obj_xy, foc_xy]", "def _trace_route(self, debug=False, time=False):\n self.radius = 2\n self.threshold = 1\n\n obstacles = []\n for vehicle in self._world.get_actors().filter('vehicle.*'):\n #print(vehicle.bounding_box)\n # draw Box\n bb_points = TestAgent._create_bb_points(vehicle)\n global_points= TestAgent._vehicle_to_world(bb_points, vehicle)\n global_points /= global_points[3,:]\n\n my_bb_points = TestAgent._create_bb_points(self._vehicle)\n my_global_points = TestAgent._vehicle_to_world(my_bb_points, self._vehicle)\n\n my_global_points /= my_global_points[3,:]\n dist = np.sqrt((my_global_points[0,2]-global_points[0,2])**2 + (my_global_points[1,2]-global_points[1,2])**2 + (my_global_points[2,2]-global_points[2,2])**2)\n\n if 0<dist:\n vehicle_box = [global_points[0,0],global_points[1,0],global_points[0,1],global_points[1,1]]\n obstacles.append(vehicle_box)\n print(f'vehicle box: {vehicle_box}')\n\n print('number of near obstacles: ', len(obstacles))\n if len(obstacles) == 0:\n self.obstacles = np.array([[-1,-1,-1,-1]]).astype(np.float32)\n self.num_obs = self.num_obs = np.array([0]).astype(np.int32)\n else:\n self.obstacles = np.array(obstacles).astype(np.float32)\n self.num_obs = self.num_obs = np.array([self.obstacles.shape[0]]).astype(np.int32)\n\n iter_parameters = {'start':self.start, 'goal':self.goal, 'radius':self.radius, 'threshold':self.threshold, 'obstacles':self.obstacles, 'num_obs':self.num_obs}\n \n start_timer = timer()\n route = self.gmt_planner.run_step(iter_parameters, iter_limit=1000, debug=debug, time=time)\n end_timer = timer()\n print(\"elapsed time: \", end_timer-start_timer) \n\n if time:\n self.time_df = pd.DataFrame(self.gmt_planner.time_data)\n \n\n # trace_route = []\n # for r in route:\n # wp = carla.Transform(carla.Location(self.states[r][0].item(), self.states[r][1].item(), 1.2), carla.Rotation(roll=0,pitch=0, yaw=(self.states[r][2]*180/np.pi).item()))\n # trace_route.append(wp)\n # draw_route(self._vehicle.get_world(), trace_route)\n\n index = len(route)-1\n trace_route = []\n for i in range(len(route)-1):\n wp = self._map.get_waypoint(carla.Location(self.states[route[index]][0].item(), self.states[route[index]][1].item(), 1.2)) # , carla.Rotation(roll=0,pitch=0, yaw=(self.states[r][2]*180/np.pi).item()\n trace_route.append((wp,-1))\n index -= 1\n\n return trace_route", "def propagate(self, time):\n return Ray(self.direction, self.position + time * self.direction)", "def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def trace_path(ray, container_node, distance):\n if distance < 2*EPS_ZERO:\n # This is a very small step size. It could occur naturally, but it is much\n # more likely to be a bug\n raise TraceError(\"Distance is on the order of trace epsilon.\")\n\n # Trace the ray through the material\n local_ray = ray.representation(\n container_node.root, container_node\n )\n for (local_ray, decision) in container_node.geometry.material.trace_path(\n local_ray, container_node.geometry, distance):\n new_ray = local_ray.representation(\n container_node, container_node.root\n )\n yield new_ray, decision", "def shoot(uid, secret, enemy_uid, x, y):\n try:\n SERVER.validate_player(uid, secret)\n result = SERVER.shoot(uid, enemy_uid, x, y)\n except RoboBattleshipException as e:\n # if battle is over - archive it\n if e.code == 304:\n SERVER.archive_battle(uid, enemy_uid)\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to shoot at player '%s' at [%s,%s]\",\n enemy_uid, x, y)\n return JsonResponse.error(101)\n\n return JsonResponse.success({'result': result})", "def shootProjectile(configuration):\n leftData, rightData = getArrowData(configuration)\n return leftData[0] > 0 or rightData[0] > 0", "def rays(self):\n pixels = np.array([\n [u, v, 1.]\n for u, v in product(range(self.width), range(self.height))\n ], dtype=np.int32).T\n rays = project(self.camera.P_pinv, pixels)\n\n return self._camera.center, rays.T", "def hit_wall(self):\n if self.ball.x <= 0 or self.ball.x + self.ball.width > self.window.width:\n self.__dx = -self.__dx\n if self.ball.y <= 0:\n self.__dy = -self.__dy", "def shoot(self, x: float, y: float) -> None:\n bullet = entities.bullet_from_pool()\n if bullet is not None:\n bullet.active = True\n bullet.position.x = x\n bullet.position.y = y\n bullet.rotation = 270 * (math.pi / 180) # degrees to radians\n bullet.update()", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def step(self):\n\n self.ball_x = self.ball_x + self.vel_x\n self.ball_y = self.ball_y + self.vel_y\n if self.ball_y >= 480:\n self.vel_y *= -1\n elif self.ball_y <= 0:\n self.vel_y *= -1\n if self.ball_x >= 640:\n self.vel_x *= -1\n elif self.ball_x <= 0:\n self.vel_x *= -1", "def tap_screen(self, element=None, config=None, x_cord=None, y_cord=None):\n if element and config:\n self.touch.tap(x=config[element]['x'],\n y=config[element]['y']).perform()\n elif x_cord:\n self.touch.tap(x=x_cord, y=y_cord).perform()\n else:\n LOGGER.error('Either element or co-ordinates must be given for tap!')\n time.sleep(2)", "def RayCast( self, p0, p1, callback, userdata):\n\t\tself.raycastUserData = userdata\n\t\tself.raycastCallback = callback\n\t\tself.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])", "def HitTest(self, x, y):\r\n\r\n return 0", "def __call__(self):\n return self.shoot()", "def is_ray(self): \n return False", "def pick(self, start, direction, mat):\n new_mat = np.dot(\n np.dot(mat, self.translation_matrix),\n np.linalg.inv(self.scaling_matrix)\n )\n\n results = self.aabb.ray_hit(start, direction, mat)\n return results", "def _trace_ray(self, ray, depth=0, max_depth=5):\n\n color = Color()\n\n if depth >= max_depth:\n return color\n\n intersection = self._get_intersection(ray)\n if intersection is None:\n return color\n\n obj, dist = intersection\n intersection_pt = ray.point_at_dist(dist)\n surface_norm = obj.surface_norm(intersection_pt)\n\n # ambient light\n # color += obj.material.color * obj.material.ambient\n\n point_on_plane = ray.origin + dist*ray.direction\n imgx = point_on_plane.x\n imgy = np.sqrt(point_on_plane.y*point_on_plane.y + point_on_plane.z*point_on_plane.z)\n\n\n '''\n # Nearest Texel\n int_imgx = int(round(imgx))\n int_imgy = int(round(imgy))\n if int_imgx == 512:\n int_imgx = 511\n if int_imgy == 512:\n int_imgy = 511\n color += Color(img[int_imgx, int_imgy, 0], img[int_imgx, int_imgy, 1], img[int_imgx, int_imgy, 2])\n '''\n\n\n # Bilinearly Interpolated Texel\n ceilx = int(math.ceil(imgx))\n ceily = int(math.ceil(imgy))\n floorx = int(math.floor(imgx))\n floory = int(math.floor(imgy))\n if ceilx >= 512:\n ceilx = 511\n if ceily >= 512:\n ceily = 511\n if floorx >= 512:\n floorx = 511\n if floory >= 512:\n floory = 511\n interpolate_x1 = (ceilx - imgx) * (img[ceilx, ceily]) + (imgx - floorx) * (img[floorx, ceily])\n interpolate_x2 = (ceilx - imgx) * (img[ceilx, floory]) + (imgx - floorx) * (img[floorx, floory])\n interpolate_y = (ceily - imgy) * interpolate_x1 + (imgy - floory) * interpolate_x2\n color += Color(interpolate_y[0], interpolate_y[1], interpolate_y[2])\n # print color\n\n\n '''\n # lambert shading\n for light in self.lights:\n pt_to_light_vec = (light - intersection_pt).normalize()\n pt_to_light_ray = Ray(intersection_pt, pt_to_light_vec)\n if self._get_intersection(pt_to_light_ray) is None:\n lambert_intensity = surface_norm * pt_to_light_vec\n if lambert_intensity > 0:\n color += obj.material.color * obj.material.lambert * \\\n lambert_intensity\n\n \n # specular (reflective) light\n reflected_ray = Ray(\n intersection_pt, ray.direction.reflect(surface_norm).normalize())\n color += self._trace_ray(reflected_ray, depth + 1) * \\\n obj.material.specular\n '''\n return color", "def __init__(self, row, column):\n self._row = row\n self._column = column\n self._selected = False\n\n # Whether the square is an \"edge\" from which a ray can be launched\n self._edge = self.is_edge()\n\n # Whether or not an atom is placed on the square\n self._atom = False\n\n # Whether or not a ray has originated from the square\n # False if no - points towards Ray object if yes\n self._originating_ray = False\n\n # Whether or not a ray has terminated on the square. Defaults to False\n # Contains tuple of origin of the terminating ray if one exists.\n self._terminating_ray = False", "def ray_trace(self, max_iterations=25):\n if not bool(self.optical_system):\n return\n \n self.clear_ray_history() \n starting_rays = self.optical_system._amalgamated_sources.copy()\n for i in range(max_iterations):\n result = self.single_pass(starting_rays)\n \n if bool(result):\n starting_rays = result\n else:\n break", "def breaking_of_the_box(size = (10, 10), verbose = False):\n import numpy as np\n r, l, u, d = \"R\", \"L\", \"U\", \"D\" # initiating walkind directions\n np.random.seed(int(time.time()))\n \n # initiating field with walking directions\n field = np.random.randint(1, 5, size = (10, 10))\n field = np.where(field ==1, r, field)\n field = np.where(field =='2', l, field)\n field = np.where(field =='3', u, field)\n field = np.where(field =='4', d, field)\n\n i, j = 0, 0\n coordinates = []\n \n # iterating in a field\n while (i<field.shape[0] and i>-1) and (j<field.shape[1] and j>-1):\n prev_i,prev_j = i, j\n coordinates.append((i, j)) \n \n copy_field = field.copy()\n \n if field[i][j] == r:\n j+=1\n elif field[i][j] == l:\n j-=1\n elif field[i][j] == u:\n i-=1\n elif field[i][j] == d:\n i+=1\n copy_field[i][j] = \"X\"\n if verbose == True:\n print(copy_field, \"#\"*48, sep = \"\\n\") #printing step by step position of a player\n if (i, j) in coordinates:\n # in case of infitine loop break\n print(\"Player is stucked inside of a box\")\n break\n\n else:\n print(\"Player came out of the box\")\n print(\"Coordinates of a breaking point\", \"(\", prev_i, prev_j, \")\")", "def valid_ray(self, row, col):\n # if row nor col is at an edge space, returns False\n if row != 0 and row != 9 and col != 0 and col != 9:\n return False\n # ensures no corner spaces have been selected\n if row == 0 or row == 9:\n if col > 8 or col < 1:\n return False\n if col == 0 or col == 9:\n if row > 8 or row < 1:\n return False\n return True", "def ray_at(self, O, t):\n point = self.float_mul(t).plus(O)\n return point", "def iterate_ray(opt_model, ifcx, xy_target, fld, wvl, **kwargs):\n def y_stop_coordinate(y1, *args):\n seq_model, ifcx, pt0, dist, wvl, y_target = args\n pt1 = np.array([0., y1, dist])\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n if dir0[2] * seq_model.z_dir[0] < 0:\n dir0 = -dir0\n\n try:\n ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)\n except TraceMissedSurfaceError as ray_miss:\n ray = ray_miss.ray_pkg\n if ray_miss.surf <= ifcx:\n raise ray_miss\n except TraceTIRError as ray_tir:\n ray = ray_tir.ray_pkg\n if ray_tir.surf < ifcx:\n raise ray_tir\n y_ray = ray[ifcx][mc.p][1]\n# print(y1, y_ray)\n return y_ray - y_target\n\n def surface_coordinate(coord, *args):\n seq_model, ifcx, pt0, dist, wvl, target = args\n pt1 = np.array([coord[0], coord[1], dist])\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n if dir0[2] * seq_model.z_dir[0] < 0:\n dir0 = -dir0\n ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)\n xy_ray = np.array([ray[ifcx][mc.p][0], ray[ifcx][mc.p][1]])\n# print(coord[0], coord[1], xy_ray[0], xy_ray[1])\n return xy_ray - target\n\n seq_model = opt_model.seq_model\n osp = opt_model.optical_spec\n\n fod = opt_model['analysis_results']['parax_data'].fod\n dist = fod.obj_dist + fod.enp_dist\n\n pt0 = osp.obj_coords(fld)\n if ifcx is not None:\n if pt0[0] == 0.0 and xy_target[0] == 0.0:\n # do 1D iteration if field and target points are zero in x\n y_target = xy_target[1]\n logging.captureWarnings(True)\n try:\n start_y, results = newton(y_stop_coordinate, 0.,\n args=(seq_model, ifcx, pt0,\n dist, wvl, y_target),\n disp=False, full_output=True)\n except RuntimeError as rte:\n # if we come here, start_y is a RuntimeResults object\n # print(rte)\n start_y = results.root\n except TraceError:\n start_y = 0.0\n start_coords = np.array([0., start_y])\n else:\n # do 2D iteration. epsfcn is a parameter increment,\n # make proportional to pupil radius\n try:\n start_coords = fsolve(surface_coordinate, np.array([0., 0.]),\n epsfcn=0.0001*fod.enp_radius,\n args=(seq_model, ifcx, pt0, dist,\n wvl, xy_target))\n except TraceError:\n start_coords = np.array([0., 0.])\n else: # floating stop surface - use entrance pupil for aiming\n start_coords = np.array([0., 0.]) + xy_target\n\n return start_coords", "def intersectsRay(self, ray):\n pass", "def shoot(self):\r\n bullet = Bullet(self.rect.centerx, self.rect.top)\r\n ammo.add(bullet)", "def test_polygon_walk(self):\n import numpy\n\n s = space(curvature=-1)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n\n def make_triangle(f, v):\n f = t1_ref / f\n v = t1_ref / v / 2\n a = (common_math.cos(f) + 1)/common_math.sin(v)**2 - 1\n a = common_math.sqrt(a**2 - 1)\n b = a / common_math.sin(f) * common_math.sin(v)\n a = common_math.asinh(a)\n b = common_math.asinh(b)\n return a, v, b, f, b, v\n\n # use {7, 3} tiling\n\n edge, angle, *_ = make_triangle(7, 3)\n\n def check_walk_eq(t1, t2, invert=False):\n for ref in (\n s.make_origin(2),\n s.make_point((3/5, 4/5), 1/3)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n t1 = t2 = space_point_transform(s.make_origin(2))\n\n def rotater(angle):\n return space_point_transform(\n numpy.array([\n [1, 0, 0],\n [0, numpy.cos(angle), -numpy.sin(angle)],\n [0, numpy.sin(angle), numpy.cos(angle)]\n ]),\n curvature = -1,\n math = common_math\n )\n\n spin_half = rotater(t2_ref)\n spin_left = rotater(angle)\n spin_right = rotater(-angle)\n forward = space_point_transform(s.make_point((1, 0), edge))\n\n # do some walking in a circle to sanity test\n\n t1 = forward + spin_half + spin_left + spin_half + spin_right + t1\n t2 = forward + t2\n\n check_walk_eq(t1, t2)\n\n # spin around some more\n\n t1 = forward + spin_left * 6 + spin_half + t1\n t2 = forward + spin_half + t2\n\n check_walk_eq(t1, t2)\n\n # walk in a loop\n t1 = (forward + spin_left) * 7 + t1\n\n check_walk_eq(t1, t2)\n\n # walk another way in a loop\n t1 = (spin_right + forward) * 7 + t1\n\n check_walk_eq(t1, t2)\n\n # walk in a more complicated path\n\n t2 = ((spin_right + forward) * 5 + spin_left + forward) * 2 + t2\n\n check_walk_eq(t1, t2)\n\n # a bigger loop\n\n t2 = (spin_left + forward + (spin_right + forward) * 3) * 7 + t2\n\n check_walk_eq(t1, t2)\n\n # a bigger loop, but in the correct direction\n\n t2 = t2 + ((forward + spin_right) * 3 + forward + spin_left) * 7\n\n check_walk_eq(t1, t2)", "def shoot_boolet(self):\n angle = self.angle\n for i in range(3):\n bullet = BulletAlienDos(self.main_game, shooter=self.shooter)\n bullet.vector[0] = 0\n bullet.vector[1] = 1\n bullet.normalized_vector = bullet.vector.normalize()\n bullet.normalized_vector = bullet.normalized_vector.rotate(angle)\n angle -= self.angle\n self.main_game.alien_bullets.add(bullet)", "def trace(self, ray): # type: (Ray) -> Vector\n hit_object = None\n t = numpy.inf\n\n for scene_object in self.scene.shapes:\n t0 = scene_object.intersect(ray)\n if t0 < t:\n t = t0\n hit_object = scene_object\n\n # if there were no intersections, then return the background colour\n if t == numpy.inf:\n return self.scene.camera.background\n\n hit_point = ray.origin + ray.direction * t\n normal = hit_object.normal(hit_point)\n luminance = 0.0\n\n # perform shading calculations\n for light in self.scene.lights:\n hit_point_to_light = (light.centre - hit_point).normal\n\n #check whether this light contributes to the shading\n in_shadow = False\n for shadower in self.scene.shapes:\n # we don't want to test against itself\n if shadower == hit_object:\n continue\n shadow_ray = Ray(hit_point + normal * 0.0001, hit_point_to_light)\n if shadower.intersect(shadow_ray) < numpy.inf:\n in_shadow = True\n break\n if in_shadow:\n continue\n\n # super simple lambertian lighting model\n luminance += hit_point_to_light.dot(normal) * light.power\n\n # calculate shaded colour - luminance may be over one if there are multiple light sources\n # normally this would be dealt with by HDR and tone mapping but is just clipped\n # in demo ray tracers\n object_colour = hit_object.material.colour * min(luminance, 1.0)\n\n # calculate reflection colour if material has reflectance\n if hit_object.material.reflectance != 0.0 and ray.depth != self.scene.camera.depth:\n reflected_direction = (ray.direction - normal * 2 * (ray.direction.dot(normal))).normal\n # we need to 'translate' the reflection vector away from the hitpoint otherwise\n # we risk intersecting the original hit point again which causes artifacts in the reflection\n reflected_ray = Ray(hit_point + reflected_direction * 0.0001, reflected_direction, ray.depth + 1)\n reflection_colour = self.trace(reflected_ray)\n\n # interpolate shaded colour and reflected colour based on reflectance\n return Vector(*[lerp(object_colour.data[i], reflection_colour.data[i], hit_object.material.reflectance) for i in range(3)])\n\n return object_colour", "def fireEast(self):\n self.rotate('e')\n gun = Laser(self)\n gun.shoot('e')\n self.agent.actionCompleted()", "def ai_aimed_coord(direction):\n row, col = ai_targetting['last_hit_coord'][0], ai_targetting['last_hit_coord'][1:]\n row_index = row_label.index(row)\n col_index = col_label.index(col)\n in_bounds = False\n if ai_targetting[direction] == True:\n ai_targetting['guess_dir'] = direction\n if direction == 'up':\n row_aimed = row_index - ai_targetting['aim_radius']\n if row_aimed < len(row_label):\n in_bounds = True\n coordinate = row_label[row_aimed] + col_label[col_index]\n else:\n ai_targetting[direction] = False\n elif direction == 'down':\n row_aimed = row_index + ai_targetting['aim_radius']\n if row_aimed < len(row_label):\n in_bounds = True\n coordinate = row_label[row_aimed] + col_label[col_index]\n else:\n ai_targetting[direction] = False\n elif direction == 'right':\n col_aimed = col_index + ai_targetting['aim_radius']\n if col_aimed < len(col_label):\n in_bounds = True\n coordinate = row_label[row_index] + col_label[col_aimed]\n else:\n ai_targetting[direction] = False\n elif direction == 'left':\n col_aimed = col_index - ai_targetting['aim_radius']\n if col_aimed < len(col_label):\n in_bounds = True\n coordinate = row_label[row_index] + col_label[col_aimed]\n else:\n ai_targetting[direction] = False\n if in_bounds:\n return coordinate", "def step(self, action):\n if not hasattr(self, \"robot\"):\n raise RuntimeError(\"reset before step!!!\")\n\n control_miniBox(self.robot.robot, instruction=action, target_velocity=self.target_velocity,\n multiply=self.multiply, left_wheel_joint_index=self.left_wheel_joint_index,\n right_wheel_joint_index=self.right_wheel_joint_index, max_force=self.max_force, \n physicsClientId=self._physics_client_id)\n \n p.stepSimulation(physicsClientId=self._physics_client_id) \n self.step_num += 1\n state = self.robot.get_observation(self.target_pos)\n reward = self.__reward_func(state)\n if state[-2] < self.target_radius:\n done = True\n elif self.step_num > self.done_step_num:\n done = True\n else:\n done = False\n info = {\"distance\" : state[-2], \"collision_num\" : self.collision_num}\n\n # under evaluate mode, extra debug items need to be rendered\n if self._evaluate:\n froms, tos, results = rayTest(self.robot.robot, ray_length=self.laser_length, ray_num=self.laser_num)\n for index, result in enumerate(results):\n self.rayDebugLineIds[index] = p.addUserDebugLine(\n lineFromXYZ=froms[index], \n lineToXYZ=tos[index] if result[0] == -1 else result[3], \n lineColorRGB=self.miss_color if result[0] == -1 else self.hit_color, \n lineWidth=self.ray_debug_line_width, \n replaceItemUniqueId=self.rayDebugLineIds[index]\n )\n\n return np.array(state), reward, done, info", "def set_originating_ray(self, ray):\n\n self._originating_ray = ray", "def ship_hit(ai_settings, stats, screen, ship, boss, bullets,boss_bullets):\n if stats.ships_left > 1:\t\n # Decrement ships_left\n stats.ships_left -= 1\n # Empty the list of bullets\n bullets.empty()\n boss_bullets.empty()\n #center the ship.\n ship.center_ship()\n # Pause.\n #sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)", "def cast_rays(pos):\n global POLYGONS\n dtheta = 0.01\n coll = []\n for vertex in POLYGONS: \n dx = vertex[0] - pos[0]\n dy = vertex[1] - pos[1]\n angle = math.atan2(dy,dx)\n rays = (Ray(pos,angle2pointer(angle-dtheta)) , Ray(pos,angle2pointer(angle)) , Ray(pos,angle2pointer(angle+dtheta)))\n opts = (rays[0].cast(), rays[1].cast(), rays[2].cast())\n if opts[0] != None:\n coll.append(( angle-dtheta, (int(opts[0][0]),int(opts[0][1])) ))\n if opts[1] != None:\n coll.append(( angle, (int(opts[1][0]),int(opts[1][1])) ))\n if opts[2] != None:\n coll.append(( angle+dtheta, (int(opts[2][0]),int(opts[2][1])) ))\n shader_vertices = [x[1] for x in sorted(coll)]\n return shader_vertices", "def shoot(self):\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n theta = self.theta()\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shootBullet(self,event):\n if (not self.hasBulletLoaded):\n return\n if (self.currentBullets>=self.maxBullets):\n return\n newBullet=Bullet(self.canvasIGetDrawnOn,entityThatCreatedMe=self,xPos=self.xPos,yPos=self.yPos)\n self.hasBulletLoaded=False#the gun chamber is now empty\n self.currentBullets+=1\n newBullet.faceHeading=self.faceHeading\n newBullet.reDraw()\n newBullet.accelerateForwards(movementSpeed=bulletSpeed)#gives the bullet its inital speed.", "def gen_draw_rays_from_film(self):\r\n r = self.aperture_radius[self._elem_count - 1]\r\n step = 0.01\r\n count = ti.cast(r / step,ti.i32)\r\n for j in range(1):\r\n for i in range(count):\r\n y = r - i * step\r\n ori, dir = ti.Vector([0.0, 0.0, 0.0]), ti.Vector([y, 0.0, self.rear_z()])\r\n ok, a, b = self.gen_ray_from_film(ori, dir)\r\n if ok:\r\n self.draw_ray_from_film(ori, dir, 0)\r\n break", "def hitRadius(self, myMissile):\n myMissile.hit(self)", "def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n #decrement the value of ships_left\n stats.ships_left -= 1\n #update scoreboard\n sb.prep_ships()\n #when hit remove bullets and aliens from screen\n aliens.empty()\n bullets.empty()\n #create a new fleet with ship at centre\n create_fleet(ai_settings, screen,ship, aliens)\n ship.center_ship()\n #pause for a sec to collect defeat\n sleep(1.0)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)", "def shot_intercepted(self, defender, shot):\n\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n le1 = None\n le2 = None\n\n p = None\n q = None\n\n p = LinearEquation.intersection_circle(shot.opponent, shot.angle, defender.pos, defender.radius)\n\n if p == None:\n return False\n\n # If the goal is vertical, solving the intersection won't work\n # it is then done \"by hand\"\n if self.e_pos.x - self.s_pos.x == 0:\n # If the goal and the shot are vertical, return False\n if abs(shot.angle) == math.pi / 2:\n return False\n \n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to\n # undefined behaviors (namely if the goal is vertical)\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the x coordinate of the intersection point of the defender and the shot\n # is between the goal and the opponent x coordinates\n if abs(shot.angle) == math.pi or shot.angle == 0:\n q = Point(self.e_pos.x, o_y)\n return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)\n\n tan_theta = math.tan(shot.angle)\n\n le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n q = Point(self.e_pos.x, le2.apply(self.e_pos.x)) \n return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x) \n\n # If the goal is not vertical, it is now possible to define the coefficient\n # of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the shot is parallel to the goal (same coefficient) it doesn't\n # matter if it is intercepted (this method should only be used\n # with valid shot in the first place, this is just for completion sake)\n if math.tan(shot.angle) == ratio:\n return False\n\n # LE of the goal\n le1 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the y coordinate of the intersection point of the defender and the shot\n # is between the goal and the opponent\n if abs(shot.angle) == math.pi / 2:\n q = Point(o_x, le1.apply(o_x))\n return self.is_in_interval(min(q.y, o_y), max(q.y, o_y), p.y)\n \n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to\n # undefined behaviors (namely if the goal is vertical)\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the x coordinate of the intersection point of the defender and the shot\n # is between the goal and the opponent y coordinates\n if abs(shot.angle) == math.pi or shot.angle == 0:\n q = Point(le1.reverse(o_y), o_y)\n return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)\n\n tan_theta = math.tan(shot.angle)\n \n # LE of the shot\n le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n\n # Find the intersection of the two lines and check if the defender\n # is between this point and the opponent\n q = le1.intersection(le2)\n\n return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)" ]
[ "0.78674567", "0.6780156", "0.67677057", "0.61004895", "0.5816195", "0.5722863", "0.5721122", "0.56699514", "0.566448", "0.5559462", "0.5555234", "0.55531144", "0.55380315", "0.55212104", "0.55208117", "0.5480533", "0.54755425", "0.5391073", "0.53854626", "0.5371789", "0.5366143", "0.5360588", "0.5359978", "0.5329839", "0.53139365", "0.53127253", "0.52707887", "0.5238637", "0.5200795", "0.51774096", "0.5159399", "0.5149512", "0.5120504", "0.51202965", "0.51135844", "0.51038116", "0.5089512", "0.5065696", "0.5061354", "0.5059032", "0.5055577", "0.5049518", "0.5049145", "0.5048786", "0.50244325", "0.5022114", "0.5013953", "0.5006074", "0.5002932", "0.49986595", "0.49853525", "0.49852103", "0.49796963", "0.4954193", "0.49533167", "0.4941201", "0.4939188", "0.49325648", "0.49309778", "0.49108148", "0.49054816", "0.49042964", "0.4885302", "0.48767486", "0.48577473", "0.48500818", "0.4845266", "0.48345232", "0.48344573", "0.48215237", "0.4813334", "0.48090354", "0.47892955", "0.47811127", "0.47799608", "0.4779815", "0.4778212", "0.47744048", "0.4768151", "0.47601604", "0.475766", "0.475625", "0.47524077", "0.4748071", "0.4747908", "0.47223535", "0.47064134", "0.47061914", "0.47055212", "0.47045064", "0.4703747", "0.46972182", "0.46965462", "0.46945143", "0.4692378", "0.46907595", "0.46890947", "0.4687856", "0.4680978", "0.46705574" ]
0.748143
1
guess_atoms a function allowing a player to input guesses as to the locations of atoms on the board
def guess_atom(self, row, column): if [row, column] in self._atoms: # if an tom was properly guessed remove it from the atom's array # and return True, append the guess to the guesses array, and #remove it from the available atoms to guess from. self._guesses.append([row, column]) self._atoms.remove([row, column]) return True # otherwise deduct five points and return false self.set_score(-5) # add the guess to the guesses array self._guesses.append([row, column]) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guess_atom(self, row, col):\n if not self.valid_guess(row, col):\n return False\n # if row,col in guess list, tells players and returns True\n if self._board[row][col] != \" \":\n print(\"You've already guessed that location!\")\n return True\n # if match found, deducts 1 from atoms list\n if (row, col) in self._a_locations:\n self._atoms -= 1\n # if all atoms guessed, changes game state to win and prints it\n if self._atoms == 0:\n self.change_state(\"WON\")\n print(self.get_game_state())\n # adds guess location to list of guesses made and returns True\n self._board[row][col] = \"A\"\n return True\n # deducts 5 from score if no match and checks if you lost\n self._score -= 5\n if self._score <= 0:\n self.change_state(\"LOST\")\n print(self.get_game_state())\n self._board[row][col] = \"X\"\n return False", "def get_atom_guess(self):\r\n return self._player.get_atom_guesses()", "def __init__(self, atoms):\n self._board = Board.Board(atoms)\n self._score = 25\n self._atoms = self._board.get_atoms()\n self._guesses = []", "def guess_atom(self, atom_x, atom_y):\r\n\r\n if self._board.get_board_item(atom_x, atom_y) == 'x':\r\n # if there, add to player's list and remove from board list\r\n marker = self.get_atom_hit()\r\n circle_tuple = self.calculate_entry_exit(atom_y, atom_x)\r\n marker.update_center(circle_tuple)\r\n self._player.add_atom_guess((atom_x, atom_y), marker)\r\n self._stats.remove_atom()\r\n return True\r\n else:\r\n # use the true/false in add_atom_guess return logic to decrement\r\n marker = self.get_atom_miss()\r\n circle_tuple = self.calculate_entry_exit(atom_y, atom_x)\r\n marker.update_center(circle_tuple)\r\n if self._player.add_atom_guess((atom_x, atom_y), marker):\r\n self._stats.dec_player_score(5)\r\n return False\r\n else:\r\n return False", "def ai_get_coord():\n aim_tries = 0\n directions = ['left', 'right', 'up', 'down']\n while True:\n # If there was a hit and not all directions have been exhausted( encounted a miss)\n if ai_targetting['last_hit_coord'] != \"\" and check_aim():\n coordinate = ai_aimed_coord(directions[ai_targetting['dir_count']]) \n if ai_targetting['dir_count'] < 3:\n ai_targetting['dir_count'] += 1\n else:\n ai_targetting['dir_count'] = 0\n ai_targetting['aim_radius'] += 1\n if not coordinate:\n continue\n else:\n reset_aim()\n coordinate = get_rand_coord()\n row, col = coordinate[0], coordinate[1:]\n row_index = row_label.index(row)\n col_index = col_label.index(col) \n # If the coordinate is good, return it to program\n if board[row_index][col_index] != \"*\" and board[row_index][col_index] != \"X\":\n return coordinate\n # If the target is a miss, stop guessing in that direction\n elif board[row_index][col_index] == \"X\":\n ai_targetting[ai_targetting['guess_dir']] = False\n # If the target is already hit and the target will be a boundary next, stop this direction\n elif board[row_index][col_index] == \"*\" and check_edge_case(row_index, col_index):\n ai_targetting[ai_targetting['guess_dir']] = False\n else:\n aim_tries += 1", "def main():\n\n board = [[\".\"] * grid_size for i in range(grid_size)]\n ship_row = random_row(board)\n ship_col = random_col(board) - 1\n ships = 0\n turn = 0\n\n print_board(board)\n while turn < total_turns:\n\n guess_col = get_col()\n guess_row = get_row()\n\n print(\"-\" * 35)\n print(\n f\"You entered: {letter_and_index_conversion(guess_col, grid_size)}{guess_row} \\n\"\n )\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row - 1][guess_col - 1] = \"X\"\n print(\"Congratulations Captain! You got a hit!\")\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n ships += 1\n ship_row = random_row(board)\n ship_col = random_col(board)\n if ships == 10:\n print(\"Congratulations Captain! You won!\")\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n else:\n if (\n board[guess_row - 1][guess_col - 1] == \"X\" or\n board[guess_row - 1][guess_col - 1] == \"*\"\n ):\n print(\"You already guessed this one -_-\")\n print(\"-\" * 35)\n else:\n print(\"Your aim is WAY off! \\n\")\n board[guess_row - 1][guess_col - 1] = \"*\"\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n if turn == total_turns:\n print(\"Game Over! You ran out of turns\")\n print(\"-\" * 35)\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n\n print(f\"Turn {turn + 1} of {total_turns}\")\n print(f\"You have {10 - ships} ships left\")", "def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------", "def placeGuess(player, xPos, yPos):\n\t\n\t# Print hit ship message if other player's board\n\t# has a ship at that position\n\tif(player.otherPlayer.board[yPos][xPos] != '~'):\n\t\tplayer.guess[yPos][xPos] = '#'\n\t\tprint(\"You've hit a ship!\\n\")\n\n\t# Print miss message if no ship at that position\n\telse:\n\t\tplayer.guess[yPos][xPos] = 'm'\n\t\tprint(\"You missed!\\n\")\n\n\n\tfor i in range(0, 2):\n\n\t\t# Set enemy to be the other player\n\t\tenemy = player.otherPlayer\n\t\tship = enemy.ships[i]\n\n\t\t# If ship is already sunk, go to next iteration\n\t\tif(ship.sunk == 1):\n\t\t\tcontinue\n\n\t\tbad = 0\n\t\tsX = ship.startX\n\t\tsY = ship.startY\n\t\tori = ship.orientation\n\n\t\t# Check if all of ship in horizontal position is all hit\n\t\tif(ori == 1):\n\t\t\tfor y in range(sY, sY + ship.shipLen):\n\t\t\t\tif(player.guess[y][sX] != enemy.board[y][sX]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# Check if all of ship in vertical position is all hit\n\t\telse:\n\t\t\tfor x in range(sX, sX + ship.shipLen):\n\t\t\t\tif(player.guess[sY][x] != enemy.board[sY][x]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# If entire ship is hit, sink ship, print ship sunk message\n\t\tif(bad == 0):\n\t\t\tship.sunk = 1\n\t\t\tprint(\"You sank a \" + ship.name + \"\\n\")\n\t\t\tbreak", "def guess(mqtt_client, number_to_guess_entry):\n # TODO: 5. Uncomment the line of code below to make guesses with EV3.\n mqtt_client.send_message(\"guess\", [int(number_to_guess_entry.get())])\n number_to_guess_entry.delete(0, 'end')\n # Note: You can play the game with only TO DO 5 complete, but it will be easier to solve if you do TO DO 6 as well.", "def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)", "def get_atoms(choice_qe: bool):\n\tpwd = os.getcwd()\n\tif \"qe\" in pwd:\n\t\tatoms = re.search(r\"qe/(.*?)/\",pwd).group(1)\n\t\tprint(f\"Current 'atoms': {atoms}; is it right ([y]/n)? \", end=\"\")\n\t\twhile True:\n\t\t\tchoice = input()\n\t\t\tif choice == \"y\" or choice == \"\": return atoms\n\t\t\telif choice == \"n\":\n\t\t\t\tatoms = input(\"Please input 'atoms': \"); return atoms\n\t\t\telse: print(\"Plz enter 'y' or 'n': \", end=\"\")\n\telif choice_qe:\n\t\tprint(f\"You are not in the 'qe'-dir but switch on the qe option. Current directory:{pwd}\")\n\t\tatoms = input(\"Please input 'atoms': \"); return atoms\n\telse:\n\t\tprint(\"This program only works under 'qe' directory; you're not in here.\"); exit(1)", "def get_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words):\n\n\tprint \"\\n The word to guess is: \", mask\t\n\tprint \"\\n # of attempts: \", attempts\n\tprint \"\\n Insert a letter or a number \\n\"\n\tthe_guess = raw_input()\n\tthe_guess = the_guess.lower()\n\t# Check if the input is a valid character\n\tvalidity = check_validity(the_guess, valid_characters, user_guesses)\n\tif (validity is True):\n\t\t# CHeck if the user has guessed the letter\n\t\tif (check_if_guessed(the_guess, word_to_guess) >= 0):\n\t\t\tprint \"\\n Great! your choosed the correct letter!\"\n\t\t\tuser_guesses += the_guess\n\t\t\tmask = calculate_mask(user_guesses, word_to_guess)\n\t\t\tyou_won = check_if_won(user_guesses, word_to_guess, secret_words)\n\t\t\tif you_won is True:\n\t\t\t\t# If the user has won it stop the game\n\t\t\t\treturn\n\t\telse:\n\t\t\tattempts = attempts + 1\n\t\t\tprint \"\\n Sorry! the letter is not present in the word! you have now %d guess left\" % (6 - attempts)\n\t\t\tyou_lost = check_if_lost(attempts, secret_words)\n\t\t\tif you_lost is True:\n\t\t\t\t# If he user has lost it stop the game\n\t\t\t\treturn\n\telse:\n\t\tprint \"\\n The input is not valid! Insert a valid input\"\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn", "def getGameState(self):\n ### Student code goes here\n\n ask_tile_11 = parse_input(\"fact: (located ?X pos1 pos1)\")\n ask_tile_12 = parse_input(\"fact: (located ?X pos2 pos1)\")\n ask_tile_13 = parse_input(\"fact: (located ?X pos3 pos1)\")\n ask_tile_21 = parse_input(\"fact: (located ?X pos1 pos2)\")\n ask_tile_22 = parse_input(\"fact: (located ?X pos2 pos2)\")\n ask_tile_23 = parse_input(\"fact: (located ?X pos3 pos2)\")\n ask_tile_31 = parse_input(\"fact: (located ?X pos1 pos3)\")\n ask_tile_32 = parse_input(\"fact: (located ?X pos2 pos3)\")\n ask_tile_33 = parse_input(\"fact: (located ?X pos3 pos3)\")\n\n bindings_11 = self.kb.kb_ask(ask_tile_11)\n bindings_12 = self.kb.kb_ask(ask_tile_12)\n bindings_13 = self.kb.kb_ask(ask_tile_13)\n bindings_21 = self.kb.kb_ask(ask_tile_21)\n bindings_22 = self.kb.kb_ask(ask_tile_22)\n bindings_23 = self.kb.kb_ask(ask_tile_23)\n bindings_31 = self.kb.kb_ask(ask_tile_31)\n bindings_32 = self.kb.kb_ask(ask_tile_32)\n bindings_33 = self.kb.kb_ask(ask_tile_33)\n\n row1_list = []\n row2_list = []\n row3_list = []\n\n row1_list.append(bindings_11.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_12.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_13.list_of_bindings[0][0].bindings[0].constant.element)\n\n row2_list.append(bindings_21.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_22.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_23.list_of_bindings[0][0].bindings[0].constant.element)\n\n row3_list.append(bindings_31.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_32.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_33.list_of_bindings[0][0].bindings[0].constant.element)\n\n counter = 0\n for tile in row1_list:\n if tile == \"empty\":\n row1_list[counter] = -1\n else:\n row1_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row2_list:\n if tile == \"empty\":\n row2_list[counter] = -1\n else:\n row2_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row3_list:\n if tile == \"empty\":\n row3_list[counter] = -1\n else:\n row3_list[counter] = int(tile[4:])\n counter += 1\n\n gamestate = (tuple(row1_list), tuple(row2_list), tuple(row3_list))\n return gamestate", "def react(molecules):\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return random.choice(alphabet)", "def play_best_guess(self, game):\n\n\n # create a list of cells\n cells = [game.board[i][j]\n for i in xrange(game.rows)\n for j in xrange(game.cols)]\n\n first_cell = cells[0]\n game.reveal_cell(first_cell.row, first_cell.col)\n\n # draw updated board and pause for a second\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n\n\n total_flagged = 0\n while not game.lost_game and not game.won_game:\n\n # remember if we've made a move in the while loop\n # so we know whether to make a random move later on\n made_move = False\n\n # look through all revealed cells for any with a number of neighboring mines.\n # if the cell has the same number of unrevealed neighbors as the cell's\n # number of neighboring mines, all the unrevealed neighbors must be mines.\n revealed_numbered_cells = [c for c in cells if c.revealed and (not c.flagged) and (c.neighbors > 0)]\n while revealed_numbered_cells:\n cell = revealed_numbered_cells.pop()\n # cell may have been marked flagged after revealed_numbered_cells was compiled\n if not cell.flagged:\n neighbor_cells = ms.Minesweeper.get_neighbors(cell.row, cell.col, game.board)\n flagged_neighbors = [n for n in neighbor_cells if n.flagged]\n number_remaining_mines = cell.neighbors - len(flagged_neighbors)\n unknown_neighbors = [n for n in neighbor_cells if not n.flagged and not n.revealed]\n if number_remaining_mines > 0 and len(unknown_neighbors) == number_remaining_mines:\n # flag every neighbor\n for c in unknown_neighbors:\n if total_flagged < game.mines:\n total_flagged += 1\n game.flag_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # we may have won with the flag above so test whether we're still playing\n # before further calculations\n if not game.lost_game and not game.won_game:\n # loop through all unrevealed, unflagged cells and see if we know it's safe to reveal\n for c in cells:\n if not c.revealed and not c.flagged and self.is_cell_safe(c, game.board):\n game.reveal_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # assume we've made our best guesses and now have to guess randomly\n # this will prevent us from looping forever if no obvious moves are available\n if not made_move:\n unrevealed = [c for c in cells if not c.revealed and not c.flagged]\n if len(unrevealed) > 0:\n cell = random.choice(unrevealed)\n game.reveal_cell(cell.row, cell.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(3)", "def human_go(self, board):\r\n coord_pattern = re.compile(\r\n \"[0-{}],[0-{}]\".format(board.shape[0], board.shape[1])\r\n )\r\n print(\"Enter Coordinates of your go then press enter.\")\r\n input_str = input(\"(space seperated, 0-2 with origin in top left)\\n\")\r\n\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n y, x = [int(coord) for coord in input_str.split(\",\")]\r\n if board[x][y] != 0:\r\n print(\"That square is already taken, please try again\")\r\n self.human_go()\r\n else:\r\n board[x][y] = -1\r\n return board", "def __init__(self, atom_list):\n\n self._board = [\n [\" \", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", u\"\\u2191\", \" \"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [u\"\\u2192\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", u\"\\u2190\"],\n [\" \", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", u\"\\u2193\", \" \"]]\n self._a_locations = atom_list\n self._score = 25\n self._guesses = []\n self._portals = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\",\n \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\",\n \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n self._atoms = len(atom_list)\n self._game_state = \"PLAYING\"", "def get_inputs(self):\n self.console.write(self.words.show_lines() + \"\\n\")\n self.console.write(self.jumper.jumper_output())\n guess = input(\"Guess a letter [a-z]: \").lower()\n self.good_guess = self.words.get_lines(guess)", "def start_game(self, num_atom):\r\n\r\n # Reset the game statistics\r\n self._stats.set_status(\"playing\")\r\n self.update_screen()\r\n if type(num_atom) == str:\r\n atom_list = self.manual_input()\r\n self.update_board_atoms(atom_list)\r\n else:\r\n atom_list = []\r\n while len(atom_list) < num_atom:\r\n atom_tup = randint(1, 8), randint(1, 8)\r\n if atom_tup not in atom_list:\r\n atom_list.append(atom_tup)\r\n self.update_board_atoms(atom_list)", "def get_qe_atoms(pwd, qe_switch: bool):\n\tif not qe_switch:\n\t\treturn False\n\tatoms_list = []\n\tfor job in joblist:\n\t\tfin = open(job, \"r\"); file = fin.read(); fin.close()\n\t\tatoms = re.search(r\"-np\\s+\\$NPROCS\\s+.*\\s+<(.*?)\\..*>\\s+(.*?)\\..*\", file).group(1)\n\t\tatoms_list.append(atoms) if atoms not in atoms_list else 0\n\tif len(atoms_list) == 1 and atoms_list[0] in pwd:\n\t\treturn False\n\telse:\n\t\tprint(f\"Atom_name in jobs is not coincident with it in workdir; current workdir: {pwd}, 'atoms' in jobs: {atoms_list}\")\n\t\tatoms = input(\"Please enter the 'atoms' of your batch: \")\n\treturn atoms", "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def _input_coords(game,player):\r\n print(Player.get_name(player)+'('+Player.get_spec(player)+')'+\" it's your turn\")\r\n coords = input(\"coords of cell? \")\r\n coords = coords.split(',')\r\n try :\r\n x = int(coords[0])\r\n y = int(coords[1])\r\n if game[x][y] == '' : \r\n return (x,y)\r\n except :\r\n return _input_coords(game,player)\r\n print('illegal play, choose an empty cell')\r\n return _input_coords(game,player)", "def update_board_atoms(self, list_atoms):\r\n self._board = Board(list_atoms, self._screen)\r\n self._stats.update_num_atoms(len(list_atoms))", "def test_boxes_to_atoms(self, postfix_directory):\n protein_file = os.path.join(postfix_directory, \"PfATP4.pdb\")\n ligand_file = os.path.join(postfix_directory, \"SJ733.pdb\")\n coords = rdkit_util.load_molecule(protein_file)[0]\n boxes = dc.dock.binding_pocket.get_all_boxes(coords)\n\n mapping = dc.dock.binding_pocket.boxes_to_atoms(coords, boxes)\n assert isinstance(mapping, dict)\n for box, box_atoms in mapping.items():\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = box\n for atom_ind in box_atoms:\n atom = coords[atom_ind]\n assert x_min <= atom[0] and atom[0] <= x_max\n assert y_min <= atom[1] and atom[1] <= y_max\n assert z_min <= atom[2] and atom[2] <= z_max", "def tinyMazeSearch(problem):\n print \"ahahaha\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def new_game(secret_words):\n\n\tattempts=0\n\tword_index = random.randint(0,5)\n\tword_to_guess = secret_words[word_index]\n\tglobal mask\n\tmask = \" _ \" * len(secret_words[word_index])\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn", "def atoms(formula):\n pass\n # ======== YOUR CODE HERE ========", "def test_easy_bot_map1(self):\n game = self.bot_test_map1(Difficulty.easy)\n self.assertEqual(game.first_player.ask_for_move(), (0, 7))", "def main():\n word = random_word()\n attempt_left = N_TURNS\n ans = intro(word, attempt_left)\n while attempt_left != 0:\n hangman_figure(attempt_left)\n ans, attempt_left = hangman(word, ans, attempt_left)\n if ans == word: # if players had guess the word correctly\n print('You are correct!')\n print('You win!!')\n print('The word was: ' + word)\n break\n else:\n print('The word looks like: ' + ans)\n if attempt_left == 0: # players failed to guess the word correctly\n hangman_figure(attempt_left)\n print('You are completely hung : (')", "def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result", "def interact():\r\n\tboard = None\r\n\tline = raw_input(\"Enter Command (h for help): \")\r\n\tline = line.replace(\" \", \"\")\r\n\twhile line and line[0] != \"q\":\r\n\t\tcommand = line[0]\r\n\t\tif command == 'n': \r\n\t\t\tcoordinates = ast.literal_eval(line[1:])\r\n\t\t\trows = coordinates[0]\r\n\t\t\tcols = coordinates[1]\r\n\t\t\tboard = createBoard(int(rows),int(cols))\r\n\t\telif command == 'i':\r\n\t\t\tliveCells = ast.literal_eval(line[1:])\r\n\t\t\tfor cells in liveCells:\r\n\t\t\t\tboard[cells[0]-1][cells[1]-1] = 1\r\n\t\telif command == 'p':\r\n\t\t\tprintBoard(board)\r\n\t\telif command == 'r':\r\n\t\t\tnumOfGens = ast.literal_eval(line[1:])\r\n\t\t\tfor n in range(numOfGens):\r\n\t\t\t\tboard = next_life_generation(board)\r\n\t\t\t\tprintBoard2(board)\r\n\t\telif command == 's':\r\n\t\t\tprintBoard2(board)\r\n\t\telif command == 'd':\r\n\t\t\tprintBoard2(board)\r\n\t\telif command == 'h':\r\n\t\t\tprint(\"\"\" \r\nCommands:\r\nn\t[height,width] (Create a height * width board)\r\ni\t(initialize life)\r\np\t(print the board)\r\ns\t(display the board)\r\nr\t(advance n generations, displaying each after)\r\nh\t(display this help reminder)\r\n\r\nq\t(quit)\"\"\")\r\n\t\tline = raw_input(\"Enter Command (h for help): \")\r\n\t\tline = line.replace(\" \", \"\")\r\n\tprint \"Life is over\\n\"", "def random_displace_atoms(self, atoms, displacement: float = 0.05):\n np.random.seed(42)\n coords = np.array([atoms.get_position(i) for i in range(len(atoms))])\n coords += displacement * (np.random.rand(*coords.shape) - 0.5) * 2.0 / np.sqrt(3.0)\n atoms.positions = coords", "def get_col():\n\n while True:\n try:\n guess_letter = str(input(\"Guess a column: \\n\")).upper()\n guess = letter_and_index_conversion(guess_letter, grid_size)\n if guess in range(1, grid_size + 1):\n return guess\n else:\n print(\"Bruh! That's not even in the ocean o_O\")\n except ValueError:\n print(\n f\"\\nPlease enter a letter for the column between {alphabet_list[0]} and {alphabet_list[grid_size - 1]}\"\n )", "def guess_potentialisation(self, sysargs):\n\n print(\"Guessing potentialisation...\")\n print(\"Copying reference basis...\")\n shutil.copyfile(self.reference_guess_basis_path, os.path.join(os.getcwd(), 'basis'))\n\n sp2_replacement_list = []\n sp2_deletion_list = []\n sp2_carbon_list = []\n sp3_replacement_list = []\n sp3_deletion_list = []\n sp3_carbon_list =[]\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n\n # Sort through carbons to decide what needs potentialising. Find atoms bonded to each carbon\n for atom in carbon_atoms:\n distanced_atoms = self.order_atoms_by_distance_from(atom['#'])\n nearest_4_distances = [self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) for distanced_atom in\n distanced_atoms[1:5]]\n bonded_distances = [less_than_distance for less_than_distance in nearest_4_distances if\n less_than_distance < self.bond_deciding_distance]\n\n # if 3 bonded atoms, may be sp2, check if they're hydrogens\n if len(bonded_distances) == 3:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n sp2_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp2_replacement_list.append(str(atom['#']))\n sp2_carbon_list.append(atom)\n\n # if 4 bonded atoms, may be sp3, check if they're hydrogens\n elif len(bonded_distances) == 4:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n if len(hydrogens_bonded_to_this_atom) == 3:\n sp3_replacement_list.extend([str(hydrogen['#']) for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_carbon_list.append(atom)\n\n log_file = open('pseudification.log', 'w+')\n log_file.writelines(\n 'sp2 carbon indices: %s \\nsp3 carbon indices: %s \\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_carbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_carbon_list)\n ))\n\n sp2_coord_command = 'mn sp2 %s' % (','.join(sp2_replacement_list))\n print(\"sp2 command: %s\" % sp2_coord_command)\n sp3_coord_command = 'mn sp3 %s' % (','.join(sp3_replacement_list))\n print(\"sp3 command: %s\" % sp3_coord_command)\n\n if 'nosp3' not in sysargs:\n self.pseudopotentialise_ethane_like_molecule(sp3_coord_command.split(), execute_deletion=False)\n self.pseudopotentialise_molecule(sp2_coord_command.split(), execute_deletion=False)\n\n self.delete_specified_atoms(sp2_deletion_list + sp3_deletion_list)\n\n print(\"Identifying 2-electron sp2 carbons...\")\n # Now need to work out where the 2e sp2 carbons are\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n if len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n print(\"Re-discovered %s sp2 carbons.\" % str(len(sp2_pseudocarbon_list)))\n\n # Now check for ncore=4 sp2 pseudocarbons\n pseudopotential_hashes_to_delete = []\n for atom in sp2_pseudocarbon_list:\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n carbons_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_carbon_list[1:5] if\n self.measure_atom_atom_dist(atom['#'],\n distanced_atom[\n '#']) < self.bond_deciding_distance]\n print(\"Carbons bonded to atom %s: %s\" % (str(atom['#']),\n str([carbon['#'] for carbon in carbons_bonded_to_this_atom])))\n\n for carbon_bonded_to_this_atom in carbons_bonded_to_this_atom:\n if carbon_bonded_to_this_atom not in sp2_pseudocarbon_list:\n def distance_from(list_atom):\n return self.measure_atom_atom_dist(carbon_bonded_to_this_atom['#'], list_atom['#'])\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # find pseudos closest to the other carbon\n pseudos_distanced_from_sp2_2e = sorted(carbon_pseudos, key=distance_from)\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[0]['#'])\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[1]['#'])\n\n self.delete_specified_atoms(pseudopotential_hashes_to_delete)\n\n # Read final coordinates\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n sp2_2e_pseudocarbon_list = []\n sp2_2e_pseudohydrogen_list = []\n sp3_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(carbon_pseudos) == 3:\n sp3_pseudocarbon_list.append(atom)\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(carbon_pseudos) == 4:\n sp2_2e_pseudocarbon_list.append(atom)\n sp2_2e_pseudohydrogen_list.extend(carbon_pseudos)\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n\n\n log_file.writelines(\n 'sp2 pseudocarbon indices: %s \\nsp3 pseudocarbon indices: %s\\nsp2 2e pseudocarbon indices: %s\\nsp2 2e pseudohydrogen indices: %s\\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudohydrogen_list)\n ))\n\n # Need to supply potentials to atoms\n define_cmds_path = 'define_add_pseudos'\n with open(os.path.join(define_cmds_path), 'w') as var_file:\n var_file.writelines(define_cmds % (\n # sp2 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'ecp', self.sp2_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'ecp', self.sp2_hydrogen_ecp),\n # sp3 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'ecp', self.sp3_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'ecp', self.sp3_hydrogen_ecp),\n # sp2 2e potentials\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define([hydrogen['#'] for hydrogen in sp2_2e_pseudohydrogen_list], 'ecp', self.sp2_2e_hydrogen_ecp),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'ecp', self.sp2_2e_carbon_ecp),\n ))\n\n self.run_define('define_add_pseudos')", "def play_hangman(self) -> None: \n tries=6\n current_word=self.get_word()\n guessed_word = False\n word_hidden_states = [current_word[indx] for indx in sample(range(0, len(current_word)-1), randint(1, len(current_word)-2))]\n word_completion_state = [letter if letter not in word_hidden_states else \"_\" for letter in current_word]\n\n while tries > 0 and not guessed_word: \n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n self.display_state(tries,word_completion_state)\n guessed_char=str(input(\"Guess a Character : \")).upper()\n\n if guessed_char in word_hidden_states :\n print(\"\\nCorrect Guess !!!!!! Updating..........\")\n for indx,_ in enumerate(word_completion_state) : \n if guessed_char == current_word[indx]:\n word_completion_state[indx]=guessed_char\n \n word_hidden_states = [char for char in word_hidden_states if char != guessed_char]\n guessed_word = False if \"_\" in word_completion_state else True\n sleep(5)\n else :\n print(\"\\nIncorrect Guess!!! Updating!!!!!!\")\n sleep(5)\n tries=tries-1\n \n if tries == 0 and not guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-1] + \"\\n\")\n print(f\"No Tries Remaining , YOU LOST !!!!!\")\n print(f\"CORRECT WORD was ------> {current_word}\")\n print(f\"GAME OVER\")\n \n if guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-tries] + \"\\n\")\n print(f\"YOU GUESSED THE WORD CORRECTLY !!!\")\n print(f\"WORD was ------> {current_word}\")\n print(f\"Congratulations You win\")", "def checkIfHitOrMiss(guess, opponents_board):\r\n print \"\\n\\n\\n\"\r\n opponents_slot = opponents_board[guess]\r\n if opponents_slot == \" \":\r\n print \"Miss!\"\r\n opponents_board[guess] = \"M\"\r\n return \"miss\"\r\n if opponents_slot == \"M\" or opponents_slot == \"H\":\r\n print \"You've already guess this! Try again.\"\r\n print \"\\n\"\r\n return \"guess again\"\r\n if opponents_slot == \"S\":\r\n print \"You've hit the ship!\"\r\n opponents_board[guess] = \"H\"\r\n return \"hit\"", "def matchPos(self, allAtoms, pbcCount, forbiddenPosList, forbiddenAtomsList):\n length = len(allAtoms) / pbcCount\n for atomIndex in range(len(allAtoms)):\n atom = allAtoms[atomIndex]\n if atomIndex in forbiddenAtomsList:\n self._places[atomIndex] = (-1)\n self._distances[atomIndex] = (-1000)\n else:\n if self._places[atomIndex] in forbiddenPosList or self._places[atomIndex] == -1:\n self._places[atomIndex] = (self.findPlace(atom, self.lattice\n , forbiddenPosList))\n if not self._places[atomIndex] is None:\n self._distances[atomIndex] = distance(\n self.lattice.positions[self._places[atomIndex]].x0\n , [atom.x, atom.y])\n \n \n minDists = [min([self._distances[i + n * length] for n in range(pbcCount)]) for i in range(length)]\n posMinDists = [posmin([self._distances[i + n * length] for n in range(pbcCount)]) for i in range(length)]\n indexOfAtom = posmax(minDists)\n indexOfAtom += posMinDists[indexOfAtom] * length \n return (indexOfAtom, self._places[indexOfAtom])", "def main():\n grid_size = ''\n pokemons_num = ''\n\n #input grid_size\n while True:\n grid_size = input('Please input the size of the grid: ')\n if grid_size.isdigit() == True and 1 <= int(grid_size) <= 26:\n break\n #input pokemons_num\n while pokemons_num.isdigit() == False:\n pokemons_num = input('Please input the number of pokemons: ')\n grid_size = int(grid_size)\n pokemons_num = int(pokemons_num)\n\n #initalize game\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n #print(pokemon_locations)\n game = UNEXPOSED*(grid_size**2)\n \n display_game(game,grid_size)\n\n #loop until win or lose\n while True:\n print('')\n user_input = input('Please input action: ')\n #no input\n if len(user_input) == 0:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #help\n if user_input == 'h':\n print(HELP_TEXT)\n display_game(game,grid_size)\n continue\n #quit\n if user_input == 'q':\n input_tmp = input('You sure about that buddy? (y/n): ')\n if input_tmp == 'y':\n print('Catch you on the flip side.')\n break\n elif input_tmp == 'n':\n print(\"Let's keep going.\")\n display_game(game,grid_size)\n continue\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #restart\n if user_input == ':)':\n game = UNEXPOSED*(grid_size**2)\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n print(\"It's rewind time.\")\n display_game(game,grid_size)\n continue\n #flag\n if user_input[0] == 'f':\n user_input = user_input[2:]\n position = parse_position(user_input,grid_size)\n if position != None:\n index_tmp = position_to_index(position,grid_size)\n game = flag_cell(game, index_tmp)\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n else:\n position = parse_position(user_input,grid_size)\n if position != None:\n #valid action\n index_tmp = position_to_index(position,grid_size)\n #if position flagged\n if game[index_tmp] == FLAG:\n display_game(game,grid_size)\n continue\n #lose\n if position_to_index(position,grid_size) in pokemon_locations:\n for loc in pokemon_locations:\n game = replace_character_at_index(game,loc,POKEMON)\n display_game(game,grid_size)\n print('You have scared away all the pokemons.')\n break\n #next step\n positions_to_show = big_fun_search(game, grid_size, pokemon_locations, position_to_index(position,grid_size))\n game = replace_character_at_index(game, index_tmp, str(number_at_cell(game, pokemon_locations, grid_size, index_tmp)))\n for posi in positions_to_show:\n #if flagged\n if game[posi] == FLAG:\n continue\n game = replace_character_at_index(game, posi, str(number_at_cell(game, pokemon_locations, grid_size, posi)))\n else:#not valid action\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n #check win\n if check_win(game, pokemon_locations) == True:\n print('You win.')\n break", "def fit_atom_pos(single_parm):\n atom_ind = single_parm[0]\n parm_dict = single_parm[1]\n fitting_parms = single_parm[2]\n\n all_atom_guesses = parm_dict['atom_pos_guess']\n closest_neighbors_mat = parm_dict['nearest_neighbors']\n cropped_clean_image = parm_dict['cropped_cleaned_image']\n\n fit_region_size = fitting_parms['fit_region_size']\n gauss_width_guess = fitting_parms['gauss_width_guess']\n num_nearest_neighbors = fitting_parms['num_nearest_neighbors']\n min_amplitude = fitting_parms['min_amplitude']\n max_amplitude = fitting_parms['max_amplitude']\n position_range = fitting_parms['position_range']\n max_function_evals = fitting_parms['max_function_evals']\n min_gauss_width_ratio = fitting_parms['min_gauss_width_ratio']\n max_gauss_width_ratio = fitting_parms['max_gauss_width_ratio']\n verbose = False\n if 'verbose' in parm_dict:\n verbose = parm_dict['verbose']\n\n x_center_atom = all_atom_guesses[atom_ind, 0]\n y_center_atom = all_atom_guesses[atom_ind, 1]\n x_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 0]\n y_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 1]\n x_range = slice(max(int(np.round(x_center_atom - fit_region_size)), 0),\n min(int(np.round(x_center_atom + fit_region_size)),\n cropped_clean_image.shape[0]))\n y_range = slice(max(int(np.round(y_center_atom - fit_region_size)), 0),\n min(int(np.round(y_center_atom + fit_region_size)),\n cropped_clean_image.shape[1]))\n\n will_fail = False\n # Stephen says that it does not matter if guesses are outside but the fit does not work\n # well when guesses are outside the window\n x_outside = np.hstack((np.where(x_neighbor_atoms < x_range.start)[0],\n np.where(x_neighbor_atoms > x_range.stop)[0]))\n y_outside = np.hstack((np.where(y_neighbor_atoms < y_range.start)[0],\n np.where(y_neighbor_atoms > y_range.stop)[0]))\n guesses_outside = np.unique(np.hstack((x_outside, y_outside)))\n if guesses_outside.size >= 0.5 * num_nearest_neighbors:\n if verbose:\n warn('Atom {}: Too few ({} of {}) neighbors within window to fit'.format(atom_ind, num_nearest_neighbors -\n guesses_outside.size,\n num_nearest_neighbors))\n will_fail = True\n else:\n guesses_inside = np.invert(np.in1d(np.arange(num_nearest_neighbors), guesses_outside))\n x_neighbor_atoms = x_neighbor_atoms[guesses_inside]\n y_neighbor_atoms = y_neighbor_atoms[guesses_inside]\n num_nearest_neighbors = x_neighbor_atoms.size\n\n fit_region = cropped_clean_image[x_range, y_range]\n\n # define x and y fitting range\n s1, s2 = np.meshgrid(range(x_range.start, x_range.stop),\n range(y_range.start, y_range.stop))\n s_mat = np.dstack((s1.T, s2.T))\n\n # initial guess values\n x_guess = np.hstack((x_center_atom, x_neighbor_atoms))\n y_guess = np.hstack((y_center_atom, y_neighbor_atoms))\n a_guess = cropped_clean_image[np.uint32(x_guess), np.uint32(y_guess)]\n sigma_guess = gauss_width_guess * np.ones(num_nearest_neighbors + 1)\n\n coef_guess_mat = np.transpose(np.vstack((a_guess, x_guess,\n y_guess, sigma_guess)))\n # Set up upper and lower bounds:\n lb_mat = [min_amplitude * np.ones(num_nearest_neighbors + 1),\n coef_guess_mat[:, 1] - position_range,\n coef_guess_mat[:, 2] - position_range,\n min_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]\n\n ub_mat = [max_amplitude * np.ones(num_nearest_neighbors + 1),\n coef_guess_mat[:, 1] + position_range,\n coef_guess_mat[:, 2] + position_range,\n max_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]\n lb_mat = np.transpose(lb_mat)\n ub_mat = np.transpose(ub_mat)\n\n if will_fail:\n coef_fit_mat = coef_guess_mat\n plsq = None\n else:\n # Now refine the positions!\n\n def gauss_2d_residuals(parms_vec, orig_data_mat, x_data_mat):\n \"\"\"\n Calculates the residual\n Parameters\n ----------\n parms_vec : 1D numpy array\n Raveled version of the parameters matrix\n orig_data_mat : 2D numpy array\n Section of the image being fitted\n x_data_mat : 3D numpy array\n\n Returns\n -------\n err_vec : 1D numpy array\n Difference between the original data and the matrix obtained by evaluating parms_vec with x_data_mat\n \"\"\"\n # Only need to reshape the parms from 1D to 2D\n parms_mat = np.reshape(parms_vec, (-1, 4))\n\n err = orig_data_mat - multi_gauss_surface_fit(parms_mat, x_data_mat)\n return err.ravel()\n\n plsq = least_squares(gauss_2d_residuals,\n coef_guess_mat.ravel(),\n args=(fit_region, s_mat),\n bounds=(lb_mat.ravel(), ub_mat.ravel()),\n jac='2-point', max_nfev=max_function_evals)\n coef_fit_mat = np.reshape(plsq.x, (-1, 4))\n\n if verbose:\n return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq\n else:\n return coef_guess_mat, coef_fit_mat", "def get_coords(self):\n while True:\n try:\n coords = tuple(map(int, input('y, x = ').split(',')))\n while not self.correct_player_turn(coords):\n coords = tuple(map(int, input('y, x = ').split(','))) # coordinates as a tuple\n return coords\n except ValueError:\n print(\"Oppsy daisy! That's not a corect input! 'x,y'\")", "def take_turn(state, hint):\n \"\"\" display the current state and the labels for choosing a move\"\"\"\n\n print(state) # print the game board\n print(color_magenta(hint))\n print(\"\") #add a space # print the numbers that correspond to all moves in the game board\n print(color_green(\"Your current score is: \"), color_green(str(state.score1)))\n print(color_green(\"AI's current score is: \"), color_green(str(state.check_finished_boxes() - state.score1))) # record the scores of the player and AI at the moment\n print(\"\") #add a space\n\n move = input(color_yellow(\"Please enter a number to connect: \"))\n\n \"\"\"prompt again for a move until it's a valid input and corresponds to an empty space in the board\"\"\"\n while not move.isnumeric() or not (0 <= int(move) <= 48) or (int(move) % 2 == 0) or state.board[int(move) // 7][int(move) % 7] != \"\":\n move = input(color_yellow(\"Please enter a valid connection: \"))\n number = move\n if number in hint:\n index = hint.find(number)\n if len(number) == 1:\n hint = hint[0:index] + \" \" + hint[index + 1:] # Make the moves player already made disappear\n else:\n hint = hint[0:index] + \" \" + hint[index + 2:]\n\n state.make_move(move)\n return hint", "def move_humans(self, zombie_distance_field):\r\n blocked = self.get_grid_height() * self.get_grid_width() #getting the distance value of obstacles\r\n new_positions = []\r\n for human in self.humans(): #calculate move for each human\r\n moves = self.eight_neighbors(human[0], human[1]) #getting list of up to 8 possible moves\r\n moves.append((human[0], human[1]))\r\n potential_moves = []\r\n distance = zombie_distance_field[human[0]][human[1]]\r\n for move in moves: #storing potential move if the distance is the max but not that of an obstacle\r\n if zombie_distance_field[move[0]][move[1]] < blocked:\r\n if zombie_distance_field[move[0]][move[1]] > distance:\r\n potential_moves = [move]\r\n distance = zombie_distance_field[move[0]][move[1]]\r\n elif zombie_distance_field[move[0]][move[1]] == distance: #getting multiple moves if valid\r\n potential_moves.append(move) \r\n \r\n new_positions.append(random.choice(potential_moves))\r\n self._human_list = new_positions", "def user_guess():\n return list(input(\"What is your guess?\"))", "def attack_input(self):\n while True:\n if self.user == 'player':\n print(\"ITS YOUR TURN TO ATTACK!\\n\")\n try:\n column = input('ENTER DESIRED COLUMN (A-J): \\n').upper()\n if not re.match('^[A-J]*$', column):\n print('PLEASE ENTER A VALID LETTER BETWEEN A-J')\n else:\n column = self.letters_to_numbers[column]\n break\n except KeyError:\n print('PLEASE ENTER A LETTER')\n elif self.user == 'computer guess':\n column = self.comp_attack_column()\n if column == range(0, 10):\n break\n else:\n column = random.randint(0, 9)\n break\n while True:\n if self.user == 'player':\n try:\n row = input('ENTER DESIRED ROW (0-9): \\n')\n if row in self.row_input:\n row = int(row)\n break\n else:\n raise ValueError\n except ValueError:\n print('PLEASE ENTER A VALID NUMBER BETWEEN 0-9')\n elif self.user == 'computer guess':\n row = self.comp_attack_row()\n if row == range(0, 10):\n break\n else:\n row = random.randint(0, 9)\n break\n return column, row", "def guess(word, old_ans):\n life = N_TURNS\n while life > 0:\n guess_ch = input('Your guess: ')\n guess_ch = guess_ch.upper()\n if guess_ch.isalpha() != True or len(guess_ch) != 1:\n print('Illegal format.')\n else:\n ans = ''\n if word.find(guess_ch) == -1:\n # when user doesn't find the right character\n print('There is no ' + guess_ch + \"'s in the word.\")\n life -= 1\n life = life\n for ch in word:\n if ch == guess_ch:\n ans += ch\n else:\n ans += '-'\n else:\n # when user make a correct guess that find out the right character of the word\n print('You are correct!')\n for ch in word:\n if ch != guess_ch:\n ans += '-'\n else:\n ans += guess_ch\n new_ans = ''\n for i in range(len(old_ans)):\n # to keep the previous right guess' result\n ch = old_ans[i]\n if ch.isalpha():\n new_ans += ch\n elif ch != ans[i]:\n new_ans += guess_ch\n else:\n new_ans += ch\n old_ans = new_ans\n if old_ans.isalpha():\n # when the user find all characters of the random word ans still alive\n print('You win!!')\n print('The word was: '+word)\n break\n else:\n if life > 0:\n print('The word looks like '+old_ans)\n print('You have '+str(life)+' guesses left.')\n # when the user make wrong guesses and finish all his/her guess opportunities\n if life == 0:\n print('You are completely hung : (')\n print('The word was: '+word)", "def run_single_game(words_list):\r\n word = hangman_helper.get_random_word(words_list) #random word\r\n pattern = len(word)*'_'\r\n wrong_guess_lst= list()\r\n error_count=0\r\n msg= hangman_helper.DEFAULT_MSG\r\n ask_play=False\r\n while error_count < hangman_helper.MAX_ERRORS and '_' in pattern:\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)\r\n user_input = hangman_helper.get_input()\r\n does_letter = if_letter(user_input[1]) #if the input is letter or not\r\n if user_input[0] == hangman_helper.HINT:\r\n filter_list= filter_words_list(words_list,pattern,wrong_guess_lst)\r\n filter_1 = choose_letter(filter_list,pattern)\r\n msg = hangman_helper.HINT_MSG+filter_1\r\n else:\r\n if len(user_input[1])!=1 or does_letter==False:\r\n msg= hangman_helper.NON_VALID_MSG\r\n elif user_input[1] in wrong_guess_lst or user_input[1] in pattern:\r\n msg= hangman_helper.ALREADY_CHOSEN_MSG+user_input[1]\r\n elif user_input[1] in word:\r\n pattern = update_word_pattern(word, pattern, user_input[1])\r\n msg = hangman_helper.DEFAULT_MSG\r\n else:\r\n error_count+=1\r\n msg=hangman_helper.DEFAULT_MSG\r\n wrong_guess_lst.append(user_input[1])\r\n if '_' in pattern:\r\n ask_play = True\r\n msg = hangman_helper.LOSS_MSG + word\r\n else:\r\n ask_play = True\r\n msg = hangman_helper.WIN_MSG\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)", "def run_game(ans, n):\n # transform to upper case to be case-insensitive\n ans = ans.upper()\n wrong_times = 0\n dashed = \"\"\n for i in range(len(ans)):\n dashed += '-'\n print_hangman(n, wrong_times)\n print('The word looks like: ' + dashed)\n print('You have '+str(n-wrong_times)+' guesses left.')\n while True:\n input_ch = input('Your guess: ')\n # check type of the input, just only one alphabet can be accepted\n if not (input_ch.isalpha() and (len(input_ch) == 1)):\n print('illegal format.')\n else:\n # transform to upper case to be case-insensitive\n input_ch = input_ch.upper()\n # if guessed alphabet was in the answer word\n if ans.find(input_ch) != -1:\n # check the alphabet's index in the word\n for i in range(len(ans)):\n if ans[i] == input_ch:\n # replace the guessed alphabet in the dashed string to show\n dashed = dashed[:i]+ans[i]+dashed[i+1:]\n print_hangman(n, wrong_times)\n print('You are correct!')\n # if alphabets were not all guessed, the while loop will be continued\n if not dashed.isalpha():\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if all alphabets were guessed, the game is over\n else:\n print('You win!')\n print('The word was: ' + ans)\n break\n # if guessed alphabet wasn't in the answer word\n else:\n wrong_times += 1\n # if wrong times haven't reached N_TURNS, the while loop will be continued\n print_hangman(n, wrong_times)\n if wrong_times < n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if user guessed the wrong alphabet at the last time, the game is over\n elif wrong_times == n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('You are completely hung :(')\n print('The word was: ' + ans)\n break", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")", "def test_hard_bot_map1(self):\n game = self.bot_test_map1(Difficulty.hard)\n self.assertEqual(game.first_player.ask_for_move(), (0, 11))", "def agent(obs_dict, config_dict):\n global last_move\n observation = Observation(obs_dict)\n configuration = Configuration(config_dict)\n player_index = observation.index\n player_goose = observation.geese[player_index]\n player_head = player_goose[0]\n player_row, player_column = row_col(player_head, configuration.columns)\n possible_moves = [Action.SOUTH.name, Action.NORTH.name, Action.WEST.name, Action.EAST.name]\n possible_move = None\n oldFoodDistanceToMe = 9999\n \n def getFoodsIndex(): \n foods = []\n for i in range(len(observation.food)):\n food = observation.food[i]\n foods.append(row_col(food, configuration.columns))\n return foods\n\n def getGoose(index):\n goose = []\n for i in range(len(observation.geese[index])):\n goose.append(row_col(observation.geese[index][i], configuration.columns))\n return goose\n\n def getGeesesIndex():\n geeses = []\n for geese in observation.geese:\n geeses.append(row_col(geese[0], configuration.columns))\n return geeses\n\n def randomMove(moves):\n if len(moves) > 1:\n return moves[randint(0, len(moves) - 1)]\n return moves[0]\n\n def countMoves(x1, y1, x2, y2):\n return abs((x1-x2)+(y1-y2)) \n\n def willCollide(x, y, action):\n #print(f'Goose[{player_index} in player_row: {player_row}, player_column: {player_column} move to {action} and can collide with x: {x}, y: {y}')\n if action == Action.WEST.name:\n if player_column - 1 == y and x == player_row:\n return True \n if player_column == 0 and y == 10 and x == player_row:\n return True\n if action == Action.EAST.name:\n if player_column + 1 == y and x == player_row:\n return True\n if player_column == 10 and y == 0 and x == player_row:\n return True\n if action == Action.SOUTH.name:\n if player_row + 1 == x and y == player_column:\n return True\n if player_row == 6 and x == 0 and y == player_column:\n return True\n if action == Action.NORTH.name:\n if player_row - 1 == x and y == player_column:\n return True\n if player_row == 0 and x == 6 and y == player_column:\n return True\n return False\n\n def opposite(action):\n if action == Action.NORTH.name:\n return Action.SOUTH.name\n if action == Action.SOUTH.name:\n return Action.NORTH.name\n if action == Action.EAST.name:\n return Action.WEST.name\n if action == Action.WEST.name:\n return Action.EAST.name\n\n possible_moves = [Action.SOUTH.name, Action.NORTH.name, Action.WEST.name, Action.EAST.name]\n\n if last_move[player_index] is not None:\n print(f'Geese {player_index} removes opposite last move: {opposite(last_move[player_index])}')\n possible_moves.remove(opposite(last_move[player_index]))\n\n for geese_row, geese_column in getGeesesIndex():\n for food_row, food_column in getFoodsIndex():\n foodDistanceToMe = countMoves(food_row, food_column, player_row, player_column)\n if oldFoodDistanceToMe > foodDistanceToMe:\n oldFoodDistanceToMe = foodDistanceToMe\n else:\n continue\n \n geeseDistanceToFood = countMoves(geese_row, geese_column, food_row, food_column)\n if foodDistanceToMe < geeseDistanceToFood:\n if food_row > player_row:\n possible_move = Action.SOUTH.name\n\n if food_row < player_row:\n possible_move = Action.NORTH.name\n\n if food_column > player_column:\n possible_move = Action.EAST.name\n\n if food_column < player_column:\n possible_move = Action.WEST.name\n \n print(f'Geese {player_index} based by food, possible move: {possible_move}')\n if possible_move not in possible_moves:\n print(f'Geese {player_index}, possible move: {possible_move} is banned.')\n possible_move = randomMove(possible_moves)\n\n for i in range(len(observation.geese)):\n geese_index = getGoose(i)\n j = 0\n while j < len(geese_index):\n collision = False\n x, y = geese_index[j]\n if i != player_index:\n print(f'geese {player_index} -> geese {i} part {j}')\n #print(f'goose[{player_index}] in {getGoose(player_index)[0]} with possible move: {possible_move} can colide with goose[{i}] in x: {x} and y: {y}')\n while willCollide(x, y, possible_move):\n collision = True\n print(f'goose[{player_index}] in {getGoose(player_index)[0]} with possible move: {possible_move} will colide with goose[{i}] in x: {x} and y: {y}')\n possible_moves.remove(possible_move)\n possible_move = randomMove(possible_moves)\n print(f'now goose[{player_index}] will to {possible_move}')\n j = 0\n if not collision:\n j += 1\n\n for x, y in getGoose(player_index):\n while willCollide(x, y, possible_move):\n print(f'BODY HIT!!!')\n possible_moves.remove(possible_move)\n possible_move = randomMove(possible_moves)\n\n if len(possible_moves) == 1:\n print(f'Geese {player_index} just remain this move: {possible_moves[0]}')\n possible_move = possible_moves[0]\n\n last_move[player_index] = possible_move\n\n print(f'Geese {player_index}: {getGoose(player_index)} move to {possible_move}')\n return possible_move", "def main():\n # initial condition\n ans = random_word()\n remaining_guess_num = N_TURNS\n guess_word = ''\n for i in range(len(ans)):\n guess_word += '-'\n\n # start to play hangman game\n while (remaining_guess_num > 0) and (guess_word != ans):\n print('The word looks like: ' + str(guess_word))\n print('You have ' + str(remaining_guess_num) + ' guesses left.')\n input_ch = str(input('Your guess: '))\n\n # illegal format\n if not input_ch.isalpha():\n print('illegal format.')\n elif len(input_ch) != 1:\n print('illegal format.')\n # correct format\n else:\n # case-insensitive\n input_ch = input_ch.upper()\n # wrong guess\n if ans.find(input_ch) == -1:\n print('There is no ' + str(input_ch) + '\\'s in the word.')\n remaining_guess_num -= 1\n # correct guess\n else:\n print('You are correct!')\n ans_slice = ans\n # replace all the correct guessed letter(s)\n while ans_slice.find(input_ch) != -1:\n replace_loc = len(ans) - len(ans_slice) + ans_slice.find(input_ch)\n guess_word = replace_letter(input_ch, replace_loc, guess_word)\n ans_slice = ans_slice[ans_slice.find(input_ch)+1:]\n # win\n if guess_word == ans:\n print('You win!!')\n # lose\n else:\n print('You are completely hung : (')\n print('The word was: ' + str(ans))", "def human_attack():\n while True:\n print_board(user_board)\n coordinate = (raw_input(\"Enter the coordinate you would like to attack (i.e. 'A1'): \")).upper()\n row, col = coordinate[0], coordinate[1:]\n if row in row_label and col in col_label:\n row_index = row_label.index(row)\n col_index = col_label.index(col) \n if user_board[row_index][col_index] == \"-\":\n response = get_response(coordinate)\n if response == \"H\":\n print \"\\nComputer: Hit. Lucky shot.\\n\"\n statistics['hits'] += 1\n statistics['miss_streak'] = 0\n statistics['total_guesses'] += 1\n if statistics['prev_guess'] == \"H\" or statistics['prev_guess'] == \"S\":\n statistics['hit_streak'] +=1\n if statistics['hit_streak'] > statistics['biggest_hit_streak']:\n statistics['biggest_hit_streak'] = statistics['hit_streak']\n statistics['prev_guess'] = \"H\"\n break\n # Update statistics and gameplay for a miss\n elif response == \"M\":\n print \"\\nComputer: HAH! YOU MISSED!! \\n\"\n statistics['misses'] += 1\n statistics['total_guesses'] += 1\n statistics['hit_streak'] = 0\n if statistics['prev_guess'] == \"M\":\n statistics['miss_streak'] +=1\n if statistics['miss_streak'] > statistics['biggest_miss_streak']:\n statistics['biggest_miss_streak'] = statistics['miss_streak']\n statistics['prev_guess'] = \"M\"\n break\n # Update statistics and gameplay when ship is sunk\n elif response == \"S\":\n print ship_info[board[row_index][col_index]]['name'] + \" destroyed!\"\n print \"Computer: You got that one, but you won't get the rest!!\\n\"\n statistics['hits'] += 1\n statistics['miss_streak'] = 0\n statistics['total_guesses'] += 1\n statistics['ships_destroyed'] += 1\n if statistics['prev_guess'] == \"H\" or statistics['prev_guess'] == \"S\":\n statistics['hit_streak'] +=1\n if statistics['hit_streak'] > statistics['biggest_hit_streak']:\n statistics['biggest_hit_streak'] = statistics['hit_streak']\n statistics['prev_guess'] = \"S\"\n break\n else:\n print \"Response returned bad data\"\n else:\n print \"You already guessed there! Try somewhere else.\\n\"\n else:\n print \"Please enter a valid coordinate.\\n\"", "def hangman_figure(attempt_left):\n if attempt_left == N_TURNS:\n print('___________')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 1:\n print('___________')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 2:\n print('___________')\n print('| |')\n print('| O')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 3:\n print('___________')\n print('| |')\n print('| O')\n print('| |')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 4:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 5:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 6:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| /')\n print('| |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 7:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 8:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| |')\n print('|_____')\n if attempt_left == N_TURNS - 9:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 10:\n print('___________')\n print('| |')\n print('| -O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 11:\n print('___________')\n print('| |')\n print('| -O-')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')", "def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot", "def Missing_Atoms(molecules, experiment = None):\n # Validate the Molecules\n molecules, gn = validate_molecules(molecules)\n # Declare the procedure, making sure it is OK (it is, but whatever)\n procedure = validate_procedure(\"add_missing_atoms\")\n # Determine who is running this experiment\n try:\n user = experiment[\"User\"]\n except (KeyError, TypeError, AttributeError, IPRO_Error):\n user = defaultUser\n # Create the CHARMM script\n script = introduction(\"Add missing Atoms to Molecules.\")\n script += load_input_files(experiment)\n script += load_molecules(molecules, procedure, user, \"all\")\n script += ic_fill() \n script += output_molecules(molecules, procedure, \"all\") + \"stop\\n\"\n # Run CHARMM\n input, output = execute_CHARMM_script(script, procedure, gn)\n # Load the new structures of the Molecules\n load_structures(molecules, procedure, \"all\")\n # Clean up after the procedure\n #clean_up(molecules, input, output, procedure)", "def play_a_game(strategy, word):\n guesses = []\n state_of_play = \"\"\n for i in range(len(word)):\n state_of_play = state_of_play + \" \"\n print(state_of_play)\n while state_of_play != word:\n letter = strategy.play_round(state_of_play,guesses)\n guesses.append(letter)\n print(letter)\n guess_success = False\n for i in range(len(word)):\n if letter == word[i]:\n state_of_play = state_of_play[0:i] + letter + state_of_play[i+1:]\n print(\"State of game: \" + state_of_play)\n guess_success = True\n if not guess_success:\n strategy.made_mistake()\n return strategy.mistakes", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n print \"I am here!!!\"\n return [s, s, w, s, w, w, s, w]", "def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Guess a random space that has not been guessed (or be more clever!)\n\n # Steps 2-4 are the same as Human.take_turn\n\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n\n # --------- END YOUR CODE ----------\n\n # enforce a short delay to make the computer appear to \"think\" about its guess\n time.sleep(0.5)", "def play_board(bots, n_iter=25, grid_size=5, sleep_time=0.3):\n \n food_icon = chr(1160)\n counter = grid_size * 4\n food_list = []\n # finds positions for food \n while counter is not 0:\n food_list.append([random.randrange(grid_size),\n random.randrange(grid_size)])\n counter -= 1\n \n # If input is a single bot, put it in a list so that procedures work\n if not isinstance(bots, list):\n bots = [bots]\n \n # Update each bot to know about the grid_size they are on\n for bot in bots:\n bot.grid_size = grid_size\n\n for it in range(n_iter):\n\n # Create the grid\n grid_list = [['.'] * grid_size for ncols in range(grid_size)]\n \n # bots will eat food if in same location\n eat(bots, food_list)\n \n # Add bot(s) to the grid\n for bot in bots:\n grid_list[bot.position[0]][bot.position[1]] = bot.character \n \n # Add food to the grid\n for food_loc in food_list:\n grid_list[food_loc[0]][food_loc[1]] = food_icon\n\n \n # Clear the previous iteration, print the new grid, and wait\n clear_output(True)\n print('\\n'.join([' '.join(lst) for lst in grid_list]))\n sleep(sleep_time)\n\n # Update bot position(s) for next turn\n for bot in bots:\n bot.move()", "def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list", "async def _guess(self, ctx):\n reply = '\\n'\n for i, entry in enumerate(db.get_leaderboard(\n ctx.message.server.id,\n 'guess-leaderboard')):\n for key, value in entry.items():\n if key == \"discord_id\":\n name = self.get_name(ctx, value)\n elif key == 'date':\n date = value\n else:\n score = value\n reply += '{}. {} - {} ({})\\n'.format(\n i+1,\n score,\n name,\n datetime.datetime.fromtimestamp(\n int(date)).strftime('%d-%m-%Y')\n )\n await self.bot.say(reply)", "def play_against_minimax():\n global FIRST_MOVE\n global done\n done = False\n g = Game()\n turn = np.random.randint(2)\n # if turn == RED:\n # FIRST_MOVE = False\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n # print(g.board)\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n obs = np.zeros((6, 7))\n for row, sublist in enumerate(g.board):\n for col, i in enumerate(sublist):\n observation.append(i)\n obs[col, row] = i\n\n observation = np.asarray(observation)\n action, _ = minimax(np.flipud(obs), 5, -math.inf, math.inf, True)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = (turn + 1) % 2\n return", "def input_coordinates(playing_field, playground_mines):\n while True:\n try:\n x = input('Write number of line from 0 to %s:' % (FIELD_SIZE - 1))\n if x == 'test':\n test_game(playing_field, playground_mines)\n y = input('Write number of line from 0 to %s:' % (FIELD_SIZE - 1))\n if y == 'test':\n test_game(playing_field, playground_mines)\n elif not is_coords_in_range(int(x), int(y)):\n raise TypeError\n return int(x), int(y)\n except ValueError:\n print('Wrong input, try again')\n except TypeError:\n print('Your number of coordinate is out of field')", "def enterGuess(self):\n validPattern = False\n while not validPattern:\n print # intentional blank line\n prompt = 'Enter a guess (colors are '\n prompt += self._palette[:self._numColorsInUse] + '): '\n patternString = raw_input(prompt)\n \n validPattern = True\n if len(patternString) != self._lengthOfPattern:\n print 'The pattern must have', self._lengthOfPattern, 'pegs'\n validPattern = False\n else:\n for i in range(self._lengthOfPattern):\n if patternString[i].upper() not in self._palette[:self._numColorsInUse]:\n validPattern = False\n if not validPattern:\n print 'The color options are', self._palette[:self._numColorsInUse]\n \n if validPattern:\n pattern = Pattern(self._lengthOfPattern)\n for i in range(self._lengthOfPattern):\n pattern.setPegColor(i, self._palette.index(patternString[i].upper()))\n\n return pattern", "def play_hangman(self):\n while self.stage < 6:\n self.display_hangman()\n guess = input(f'{Fore.YELLOW}Choose a letter: {Style.RESET_ALL}').lower().strip() # noqa\n print('\\n')\n if guess.isalpha() and len(guess) == 1:\n if guess not in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.RED}{guess} is not in the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_letters.append(guess)\n elif guess.isalpha() and guess in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.GREEN}{guess} is in the word!{Style.RESET_ALL}') # noqa\n print('\\n')\n self.guessed_letters.append(guess)\n # code for replacing dashes with letters adapted from # noqa\n # https://github.com/kiteco/python-youtube-code/blob/master/build-hangman-in-python/hangman.py\n word_as_list = list(self.progress)\n indices = [i for i, letter in enumerate(self.word) if letter == guess] # noqa\n for index in indices:\n word_as_list[index] = guess\n self.progress = \"\".join(word_as_list)\n if \"-\" not in self.progress:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess == self.word:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess not in self.word and guess in self.guessed_words: # noqa\n print(f'You already guessed {guess}, try again')\n print('\\n')\n\n elif guess.isalpha() and guess not in self.word and guess not in self.guessed_words: # noqa\n print(f'{Fore.RED}{guess} is not the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_words.append(guess)\n print('\\n')\n else:\n print('Invalid input \\n')\n if self.stage >= 6:\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(f'{Fore.RED}Game Over! The word was {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.play_again()", "def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")", "def registerInitialState(self, gameState):\n\n # stuff\n self.treeDepth = 4\n self.oldFood = []\n self.lastEatenFood = None\n self.i = 0\n\n\n\n #oldFood\n self.oldFood = self.getFoodYouAreDefending(gameState)\n\n\n self.red = gameState.isOnRedTeam(self.index)\n self.distancer = distanceCalculator.Distancer(gameState.data.layout)\n\n # comment this out to forgo maze distance computation and use manhattan distances\n self.distancer.getMazeDistances()\n\n\n\n \n\n \n # FIND PATROL POINTS\n\n\n\n x = gameState.data.layout.width/2-8\n #print \"WIDTH \", x+4\n\n y1 = gameState.data.layout.height-4\n y2 = 0+4\n\n\n\n point1 = (x,y2)\n point2 = (x,y1)\n topPoints = []\n botPoints = []\n for i in range(0,6):\n xv = x+i\n if not gameState.data.layout.walls[xv][y1]:\n\n newBP = (xv, y1)\n botPoints.append(newBP)\n else:\n newBP = (xv, y1)\n #print newBP, \" in wall\"\n\n if not gameState.data.layout.walls[xv][y2]:\n newTP = (xv, y2)\n topPoints.append(newTP)\n else:\n newTP = (xv, y2)\n #print newTP, \" in wall\"\n\n\n\n\n\n # FIND PATROL POINTS WITH THE SHORTEST PATH\n bestTP = topPoints[0]\n bestBP = botPoints[0]\n\n bestPath = self.getMazeDistance(bestTP,bestBP)\n for tp in topPoints:\n bp = min(botPoints, key=lambda p: self.getMazeDistance(tp, p))\n tempPath = self.getMazeDistance(tp, bp)\n if (tempPath < bestPath):\n bestTP = tp\n bestBP = bp\n bestPath = tempPath\n\n #print \"THE REAL BEST POINTS: \", bestBP, \" \", bestTP, \" \", bestPath\n\n self.patrolPoints = [bestTP,bestBP]\n\n\n\n\n\n\n import __main__\n if '_display' in dir(__main__):\n self.display = __main__._display", "def move(self, board, player_mark='o'):\n # First things first, let's check if the board is full first before we\n # make a move\n full = 1\n for location in board.keys():\n if board[location] == '-':\n full = 0\n\n if not full:\n # Storm Spirit is a dumb yet aggressive AI, so he does not need to\n # check whether the opponent has created a line.\n\n # Initialize a move variable that determines the location that the\n # AI will mark.\n move = ''\n\n # Let's see if there are any potential lines that we can form,\n # then mark the location that would finish that line.\n print('Searching for potential lines...')\n move = self.find_line_attempt(board, 'x')\n\n if(move == ''):\n print('No potential lines found. Marking random location.')\n # Initialize a boolean variable that tracks whether we have\n # marked a location or not.\n marked = 0\n while not marked:\n location = random.randint(1,9)\n\n # The location will have to be empty\n if(location == 1 and board['topleft'] == '-'):\n marked = 1\n print('Marking topleft location\\n')\n elif(location == 2 and board['topcenter'] == '-'):\n marked = 1\n print('Marking topcenter location\\n')\n elif(location == 3 and board['topright'] == '-'):\n marked = 1\n print('Marking topright location\\n')\n elif(location == 4 and board['middleleft'] == '-'):\n marked = 1\n print('Marking middleleft location\\n')\n elif(location == 5 and board['middlecenter'] == '-'):\n marked = 1\n print('Marking middlecenter location\\n')\n elif(location == 6 and board['middleright'] == '-'):\n marked = 1\n print('Marking middleright location\\n')\n elif(location == 7 and board['bottomleft'] == '-'):\n marked = 1\n print('Marking bottomleft location\\n')\n elif(location == 8 and board['bottomcenter'] == '-'):\n marked = 1\n print('Marking bottomcenter location\\n')\n elif(location == 9 and board['bottomright'] == '-'):\n marked = 1\n print('Marking bottomright location\\n')\n else:\n # There are no more locations to mark, but set marked to\n # true anyway\n print('No empty spaces found! Re-rolling')\n # Mark the location chosen\n if(location == 1):\n board['topleft'] = self.mark\n elif(location == 2):\n board['topcenter'] = self.mark\n elif(location == 3):\n board['topright'] = self.mark\n elif(location == 4):\n board['middleleft'] = self.mark\n elif(location == 5):\n board['middlecenter'] = self.mark\n elif(location == 6):\n board['middleright'] = self.mark\n elif(location == 7):\n board['bottomleft'] = self.mark\n elif(location == 8):\n board['bottomcenter'] = self.mark\n elif(location == 9):\n board['bottomright'] = self.mark\n else:\n # We found a line attempt, let's mark the finishing location\n board[move] = self.mark\n print('Marked location at ' + move)", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n\n return [s, s, n, s, w, s, w, w, s, w]", "def getGameState(self):\n ### Student code goes here\n #print(\":::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\")\n row1_tuple = ()\n row1_list = {}\n ask1 = parse_input(\"fact: (on ?X ?Y pos1)\")\n answer1 = self.kb.kb_ask(ask1)\n if answer1 != False:\n for ans in answer1.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row1_list[int(pos)] = tile\n #print(\"ROW1: \", len(row1_list))\n for i in range(len(row1_list)):\n val = row1_list[i+1]\n #print(val)\n row1_tuple = row1_tuple + (val,)\n\n row2_tuple = ()\n row2_list = {}\n ask2 = parse_input(\"fact: (on ?X ?Y pos2)\")\n answer2 = self.kb.kb_ask(ask2)\n if answer2 != False:\n for ans in answer2.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row2_list[int(pos)] = tile\n #print(\"ROW2: \", len(row2_list))\n for i in range(len(row2_list)):\n val = row2_list[i+1]\n row2_tuple = row2_tuple + (val,)\n\n row3_tuple = ()\n row3_list = {}\n ask3 = parse_input(\"fact: (on ?X ?Y pos3)\")\n answer3 = self.kb.kb_ask(ask3)\n if answer3 != False:\n for ans in answer3.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row3_list[int(pos)] = tile\n #print(\"ROW3: \", len(row3_list))\n for i in range(len(row3_list)):\n val = row3_list[i+1]\n row3_tuple = row3_tuple + (val,)\n #print(\"-----------------------------------------------------------------------------------------------\")\n\n\n state_tuple = (row1_tuple,row2_tuple,row3_tuple)\n #print(state_tuple)\n return state_tuple", "def __generate_atoms__(self, pdb):\n\n atoms = [] # Maybe this can be a set \n # TODO: Here I can use self.structure.header['missing_residues'] to get a list of residues. It will have their seq and I can use this to make a sequential index\n for model in self.structure:\n residues = model.get_residues() # Biopython \n for residue in residues:\n full_id = residue.get_full_id()\n ins_code = full_id[3][2] \n this_model = str(int(full_id[1]) + 1) # BioPython starts at 0 and fr3d-python starts at 1. Add 1 to each model so unit ids match\n this_chain = full_id[2]\n component_number = full_id[3][1]\n if 'H' in full_id[3][0][0]:\n res_group = 'HETATM'\n else:\n res_group = 'ATOM'\n\n res = residue.get_resname().replace(\" \",\"\")\n\n if ins_code == \" \":\n ins_code = None\n\n for atom in residue:\n #drop numbers \n id = atom.id \n id = re.sub(r'\\d+', '',id)\n first = id[0]\n # logic to extract the type of atom from the id\n if 'C' == first: #Carbon\n atom_type = 'C' \n elif 'O' == first: #Ox\n atom_type = 'O'\n elif 'P' == first: #Phosphorus\n atom_type = 'P'\n elif 'N' == first: # nitrogen\n atom_type = 'N'\n else: #Magnesium, other ions\n atom_type = atom.id\n\n x = atom.coord[0]\n y = atom.coord[1]\n z = atom.coord[2]\n \n alt_id = atom.get_altloc()\n if alt_id == \" \":\n alt_id = None\n atoms.append(Atom(x=x, y=y, z=z,\n pdb=self.name,\n model=this_model,\n chain=this_chain,\n component_id=res,\n component_number=component_number,\n component_index=component_number,\n insertion_code=ins_code,\n alt_id= alt_id,\n group=res_group,\n type=atom_type,\n name=atom.get_name(),\n symmetry='1_555', #I haven't figured out how to extract symmetries from pdb files yet. Resort to identity\n polymeric=True)) # Need to find a way to parse this from biopython. Important, may be relevent in structures.py\n return atoms", "def main():\n\n game = JanggiGame()\n game.display_board()\n print(game.make_move('a7','a6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('a10','a7'))\n print(game.make_move('b1','d4'))\n print(game.make_move('a7','b7'))\n print(game.make_move('c1','a2'))\n print(game.make_move('b7','b3'))\n print(game.make_move('h3','b3'))\n print(game.make_move('e7','e6'))\n print(game.make_move('i1','h1'))\n print(game.make_move('i7','h7'))\n print(game.make_move('a2','b4'))\n print(game.make_move('b10','d7'))\n print(game.make_move('b4','a6'))\n print(game.make_move('i10','i9'))\n print(game.make_move('a6','b8'))\n print(game.make_move('c10','b8'))\n print(game.make_move('b3','b9'))\n print(game.make_move('i9','i6'))\n print(game.make_move('a1','b1'))\n print(game.make_move('b8','c6'))\n print(game.make_move('b1','b8'))\n print(game.make_move('h8','h1'))\n print(game.make_move('g3','h1'))\n print(game.make_move('e6','d6'))\n print(game.make_move('h1','g3'))\n print(game.make_move('d6','d5'))\n print(game.make_move('d4','b1'))\n print(game.make_move('i6','e6'))\n print(game.make_move('i4','i5'))\n print(game.make_move('c6','d4'))\n print(game.make_move('c4','d4'))\n print(game.make_move('d5','d4'))\n print(game.make_move('g4','f4'))\n print(game.make_move('d4','e4'))\n print(game.make_move('f4','e4'))\n print(game.make_move('e6','e4'))\n print(game.make_move('g3','e4'))\n print(game.make_move('h10','i8'))\n print(game.make_move('e4','f6'))\n print(game.make_move('g7','g6'))\n print(game.make_move('b1','d4'))\n print(game.make_move('g6','f6'))\n print(game.make_move('d4','f7'))\n print(game.make_move('d7','f4'))\n print(game.make_move('f7','c9'))\n print(game.make_move('d10','d9'))\n print(game.make_move('a4','a5'))\n print(game.make_move('f6','f5'))\n print(game.make_move('g1','e4'))\n print(game.make_move('c7','c6'))\n print(game.make_move('b8','i8'))\n print(game.make_move('f5','e5'))\n print(game.make_move('e4','g1'))\n print(game.make_move('e5','d5'))\n print(game.make_move('i8','i9'))\n print(game.make_move('f10','f9'))\n print(game.make_move('a5','a6'))\n print(game.make_move('d5','d4'))\n print(game.make_move('a6','a7'))\n print(game.make_move('d4','d3'))\n print(game.make_move('e2','d3'))\n print(game.make_move('e9','e8'))\n print(game.make_move('i9','f9'))\n print(game.make_move('h7','h6'))\n print(game.make_move('a7','b7'))\n print(game.make_move('g10','e7'))\n print(game.make_move('f9','f7'))\n print(game.make_move('d9','d10'))\n print(game.make_move('f7','e7'))\n print(game.make_move('e8','f8'))\n print(game.make_move('b7','c7'))\n print(game.make_move('h6','h5'))\n print(game.make_move('e7','e10'))\n print(game.make_move('h5','h4'))\n print(game.make_move('c7','d7'))\n print(game.make_move('h4','h3'))\n print(game.make_move('d7','e7'))\n print(game.make_move('h3','h2'))\n print(game.make_move('e7','f7'))\n game.display_board()\n print('Red in check: '+str(game.is_in_check('red')))\n print('Blue in check: '+str(game.is_in_check('blue')))\n print(game.get_game_state())", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def guess(self):\n\t\t\n\t\tpeg_guess_color_list = []\n\t\tguess_input = self.view.input_guess()\n\n\t\t# Convert guess_input into a list- each color being a string\n\t\tguess_color_list = re.split(\",\", guess_input)\n\t\t\n\n\t\tfor each_color in guess_color_list:\n\n\t\t\t#associate each string with a peg object\n\t\t\tpeg_guess = ColorPeg(each_color)\n\t\t\t\n\t\t\t# Append the peg_guess color list to make a list of peg guess objects\n\t\t\tpeg_guess_color_list.append(peg_guess)\n\n\t\t\t# Plug our peg objects into our guess object\n\t\t\tuser_guess = Guess(peg_guess_color_list)\n\n\t\t\t# Store guess object in our MasterModel\n\t\t\tself.model.guesses[self.model.status] = user_guess\n\n\t\t\t# Make a variable that\n\n\n\t\t# ### TESTS ###\n\t\t# print (\"This is each color: \", each_color)\n\t\t# print (\"print guess input again: \", guess_input)\n\t\t# print(\"prints each peg color for guess: \", peg_guess)\n\t\t# print(\"Prints the list of color guesses: \", peg_guess_color_list)\n\t\t# for peg_guess in peg_guess_color_list:\n\t\t# \tprint(\"Prints the list of guess pegs: \", peg_guess.peg_color)\n\n\t\t# print(\"Prints out the first list of guesses. Key = Guess 1\", self.model.guesses[\"Guess 1\"])", "def guess_the_number():\n\n print(\"Welcome to no.guessing game . You have 10 trials . Good luck\")\n global player\n print(f\"Player{player}'s turn : \")\n\n a = int(input(\"Enter the starting of the range:\\n\"))\n b = int(input(\"Enter the ending of the range:\\n\"))\n from random import randint\n # Generates a random number between the given range\n random_number = randint(a, b)\n global trials\n while trials <= 10:\n\n n = int(input(\"Guess a number:\\n\")) # User's number\n\n if n > random_number:\n print(\"Wrong ! Please enter a lesser number:\")\n\n elif n < random_number:\n print(\"Wrong! Please enter a greater number:\")\n else:\n print(\"Yeah ! you won \")\n print(F\"player{player} won the game in {trials} no. of trials\")\n break\n print(f\"{10-trials} no. of trials left\")\n trials += 1\n if trials>10:\n print(f\"GAME OVER! the number was {random_number}\")\n # creating player 1's and player 2's points in the global scope\n if player == 1:\n global player_point1\n player_point1 = trials\n\n else:\n global player_point2\n player_point2 = trials", "def play_game(n):\n tries = 0\n magic_number = generate_random(n)\n print(\"Let's play the mimsmind0 game.\")\n # Get and validate user's first guess\n while True:\n try:\n guess = int(input(\"Guess a {}-digit number: \".format(n)))\n tries += 1\n break\n except:\n print(\"That is not a valid number, try again.\") \n while True:\n # Check guess against magic number and give directional guidance if incorrect\n try:\n if magic_number > guess:\n guess = int(input(\"Try again. Guess a higher number: \"))\n tries += 1\n elif magic_number < guess:\n guess = int(input(\"Try again. Guess a lower number: \"))\n tries += 1\n else:\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(tries))\n break\n except:\n print(\"That's not a valid number.\")", "def manual_input(self):\r\n atom_list = []\r\n while len(atom_list) < 4:\r\n for event in pygame.event.get():\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n if 0 < column < 9 and 0 < row < 9:\r\n if (row, column) not in atom_list:\r\n atom_list.append((row, column))\r\n return atom_list", "def tinyMazeSearch(problem):\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState()) # delete this later, otherwise the start state\n # will count as expanded twice!\n print 'problem', problem\n\n\n from pac.game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [e, e, w, s, w, w, s, w]", "def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)", "def run_single_game(words_list):\r\n\r\n # Initialize parameters\r\n wrong_guess_count = 0\r\n wrong_guess_words = []\r\n already_chosed = []\r\n msg = \"\"\r\n\r\n # Get random name from the words list.\r\n random_word = hangman_helper.get_random_word(words_list)\r\n\r\n # Initialize the pattern\r\n pattern = len(random_word) * HIDDEN_SIGN\r\n\r\n # Print default message to user\r\n msg = hangman_helper.DEFAULT_MSG\r\n\r\n # the game wont stop until the pattern will be revealed or guess number\r\n # will cross the max errors available.\r\n while wrong_guess_count < hangman_helper.MAX_ERRORS and \\\r\n pattern != random_word:\r\n # display the current state in each iteration of the loop\r\n hangman_helper.display_state(pattern, wrong_guess_count,\r\n wrong_guess_words, msg)\r\n # Get input from user\r\n request = hangman_helper.get_input()\r\n\r\n # Check if the input is a guess\r\n if request[INPUT_TYPE] == hangman_helper.LETTER:\r\n\r\n # Check parameter validation\r\n if len(request[INPUT_VALUE]) != 1 or \\\r\n not request[INPUT_VALUE].islower():\r\n msg = hangman_helper.NON_VALID_MSG\r\n continue\r\n # Check if the letter already was chosen before.\r\n elif request[INPUT_VALUE] in already_chosed:\r\n msg = hangman_helper.ALREADY_CHOSEN_MSG + request[INPUT_VALUE]\r\n # If the guessed letter does exist in the word\r\n elif request[INPUT_VALUE] in random_word:\r\n # Updating the the word pattern accordingly\r\n pattern = update_word_pattern(random_word, pattern,\r\n request[INPUT_VALUE])\r\n msg = hangman_helper.DEFAULT_MSG\r\n already_chosed.append(request[INPUT_VALUE])\r\n else:\r\n wrong_guess_count += 1\r\n wrong_guess_words.append(request[INPUT_VALUE])\r\n msg = hangman_helper.DEFAULT_MSG\r\n already_chosed.append(request[INPUT_VALUE])\r\n\r\n elif request[INPUT_TYPE] == hangman_helper.HINT:\r\n # Call the filter words function\r\n sort = filter_words_list(words_list, pattern, wrong_guess_words)\r\n # Call the choose letter function\r\n chosen_letter = choose_letter(sort, pattern)\r\n # Initialize the msg variable\r\n msg = hangman_helper.HINT_MSG + chosen_letter\r\n\r\n # Initialise the display function in case winning\r\n if pattern == random_word:\r\n msg = hangman_helper.WIN_MSG\r\n # Initialise the display function in case of losing\r\n elif wrong_guess_count == hangman_helper.MAX_ERRORS:\r\n msg = hangman_helper.LOSS_MSG + random_word\r\n # Calling the display function\r\n hangman_helper.display_state(pattern, wrong_guess_count, wrong_guess_words,\r\n msg, ask_play=True)", "def handle_turn(player_):\n if player_ == computer:\n print('\\nNow ', player_ + \"'s turn.\")\n position = block_to_win()\n if position == -1:\n position = check_if_computer_can_win()\n if position == -1:\n position = randrange(0, 9)\n while board[position] not in ['_']:\n position = randrange(0, 9)\n board[position] = computer\n display_board()\n if player_ == player:\n print('\\nNow ', player_ + \"'s turn.\")\n position = int(input('Choose a position from 1-9 (available): '))\n while position not in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n position = int(input('Wrong input. Choose a position from 1-9: '))\n position = position - 1\n while board[position] not in ['_']:\n position = int(input('Position is already taken. Choose from available positions: '))\n position = position - 1\n board[position] = player\n display_board()", "def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations", "def ai_1(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n return update_board_then_give_random(board, move)\n board.ai_random_move()\n return board", "def match(self, atomsLists):\n errors = []\n allAtoms = concatenate(atomsLists) \n forbiddenPosList = []\n forbiddenAtomsList = [] #we'll need both of this to know which atom is banned\n posCount = len(atomsLists[0])\n match = [0 for _ in range(posCount)]\n referenceMatch = [0 for _ in range(posCount)]\n self._initPosSearch(len(allAtoms))\n \n for _ in range(posCount):\n \n atomIndex, placeIndex = self.matchPos(allAtoms, len(atomsLists)\n , forbiddenPosList\n , forbiddenAtomsList)\n if not placeIndex is None: \n forbiddenPosList.append(placeIndex)\n forbiddenAtomsList += [(atomIndex + n * len(atomsLists[0])) % len(allAtoms) \n for n in range(len(atomsLists))]\n if not placeIndex is None:\n pos = self.lattice.positions[placeIndex] \n errors.append(distance(pos.x0, allAtoms[atomIndex].x0)) \n if not placeIndex is None:\n match[atomIndex % len(atomsLists[0])] = placeIndex\n referenceMatch[atomIndex % len(atomsLists[0])] = atomIndex\n return match, referenceMatch, errors", "def main():\n board_state = [['_', '_', '_'],\n ['_', '_', '_'],\n ['_', '_', '_']]\n\n player_turn = int(input(\"Who goes first - select AI(0) or Human(1)? \").strip())\n human_marker = input(\"Select marker - 'X' or 'O'? \").strip()\n \n play(board_state, player_turn, human_marker, 0)", "def checkMove(guess, xPos, yPos):\n\n\t# Return 0 if x position or y position are not valid\n\tif(xPos not in range(0, 5) or yPos not in range(0, 5)):\n\t\treturn 0\n\n\t# Return 0 f the guessed position is not water\n\tif(guess[yPos][xPos] != \"~\"):\n\t\treturn 0\n\n\treturn 1", "async def localhangman(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.localhangman', extra={'invoker': ctx.message.author.name})\r\n if ctx.channel in self.channels_occupied:\r\n await ctx.send('There is already a game going on in this channel!')\r\n self.channels_occupied.add(ctx.channel)\r\n await ctx.send('Awaiting DM with word...')\r\n msg = await ctx.bot.wait_for('message',\r\n check=lambda m: isinstance(m.channel, d.DMChannel) and m.author == ctx.author)\r\n WORD = msg.content.lower()\r\n letters = ['_'] * len(WORD)\r\n lowers = (\r\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',\r\n 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',\r\n 'w', 'x', 'y', 'z'\r\n )\r\n for i in range(len(WORD)):\r\n if WORD[i] not in lowers:\r\n letters[i] = WORD[i]\r\n missed = []\r\n shanpe = 0\r\n status = await ctx.send(DGHANGMANSHANPES[shanpe] + '\\nMissed: ' + ', '.join(missed) + '\\nGotten: `' + \"\".join(letters) + '`')\r\n while \"\".join(letters) != WORD and shanpe < len(DGHANGMANSHANPES) - 1:\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and m.content in lowers)\r\n letter = guess.content\r\n await guess.delete()\r\n if WORD.find(letter) != -1:\r\n for i in self.substrs(letter, WORD):\r\n letters[i] = letter\r\n else:\r\n if letter not in missed:\r\n missed.append(letter)\r\n shanpe += 1\r\n await status.edit(content=(DGHANGMANSHANPES[shanpe] + '\\nMissed: ' + ', '.join(missed) + '\\nGotten: `' + \"\".join(letters) + '`'))\r\n if \"\".join(letters) == WORD:\r\n await ctx.send('Congratulations! You have guessed the complete word!')\r\n else:\r\n await ctx.send('You lost! The word was \\\"{}\\\".'.format(WORD))\r\n self.channels_occupied.remove(ctx.channel)", "def run_game(player_board, user_guess, computer_board, computer_guess):\n player_turn = 0 # Ensures player goes first\n computer_turn = 1 # Computer can only go once player score is equal\n # Life counter decrements each time a ship is hit\n player_lives = 15\n computer_lives = 15\n while True:\n if player_turn < computer_turn:\n user_guess.print_board()\n column, row = player_board.attack_input()\n if user_guess.board[row][column] == GUESSED:\n print('\\nYOU HAVE ALREADY GUESSED THIS CO-ORDINATE\\n')\n elif user_guess.board[row][column] == HITSHIP:\n print('\\nYOU HAVE ALREADY HIT A SHIP IN THIS CO-ORDINATE\\n')\n elif computer_board.board[row][column] == SHIP:\n print(' ')\n print(PHASE)\n print('\\nCONGRATULATIONS, YOU HIT A SHIP!\\n')\n user_guess.board[row][column] = HITSHIP\n player_turn += 1\n user_guess.lives_counter()\n user_guess.print_board()\n computer_lives -= 1\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_lives == 0:\n print('\\nTHE COMPUTER HAS NO LIVES LEFT!')\n print('YOU WIN!')\n print(' ')\n print(PHASE)\n break\n else:\n print(' ')\n print(PHASE)\n print('\\nYOU MISSED!\\n')\n user_guess.board[row][column] = GUESSED\n player_turn += 1\n user_guess.print_board()\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_turn == player_turn:\n row, column = computer_guess.attack_input()\n if computer_guess.board[row][column] == GUESSED:\n pass\n elif computer_guess.board[row][column] == HITSHIP:\n pass\n elif player_board.board[row][column] == SHIP:\n print('THE COMPUTER HIT YOUR SHIP!\\n')\n computer_turn += 1\n player_lives -= 1\n computer_guess.column_arry.append(column)\n computer_guess.row_arry.append(row)\n computer_guess.board[row][column] = HITSHIP\n player_board.board[row][column] = HITSHIP\n player_board.lives_counter()\n player_board.print_board()\n computer_guess.attk_arry.append(0)\n time.sleep(3)\n if player_lives == 0:\n print('\\nYOU HAVE NO LIVES LEFT!')\n print('YOU LOSE!')\n print(' ')\n print(PHASE)\n break\n else:\n print('COMPUTER MISSED!\\n')\n computer_guess.board[row][column] = GUESSED\n computer_turn += 1\n player_board.print_board()\n computer_guess.attk_arry.append(1)\n computer_guess.check_miss_count()\n time.sleep(3)", "def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result", "def test_med_bot_map1(self):\n game = self.bot_test_map1(Difficulty.med)\n self.assertEqual(game.first_player.ask_for_move(), (0, 11))", "def play(self):\n\n value = 0 #player dictionary key\n player = {0: 'O', 1: 'X'}\n\n moveCount = 0 #how many moves have occurred. also doubles as the self.order index.\n turn = \"\"\n while moveCount < self.n**2 and self.go == \"Tie\":\n value = not value\n turn = player[value] #X starts\n key = self.order[moveCount]\n i = key[0]\n j = key[1]\n\n\n# self.rows[i][0] == homogenous?\n# self.rows[i][1] == X/O?\n# self.rows[i][2] == count of X's/O's?\n\n# Check to see if row i is 'homogenous' (contains only X's or O's):\n if self.rows[i][0]:\n\n# Check to see if any square in row i has been played. If it has been played,\n# check to see if it was the same person who's current turn it is.\n if self.rows[i][1] == \"\" or player[value] == self.rows[i][1]:\n\n# Mark the column with the current person's token (X or O).\n# Admittedly, this could be improved to not update every time.\n self.rows[i][1] = turn\n\n# Update the count by one.\n self.rows[i][2] += 1\n\n# If the count is equal to the board size, end the game and return who won and how.\n if self.rows[i][2] == self.n:\n self.go = (turn, 'row ' + str(i))\n\n# If the current person who's turn it is,\n# is not the same as the previous player who played this row,\n# set this row's 'homogenous' attribute to false.\n else:\n self.rows[i][0] = False\n\n if self.cols[j][0]:\n if self.cols[j][1] == \"\" or player[value] == self.cols[j][1]:\n self.cols[j][1] = turn\n self.cols[j][2] += 1\n if self.cols[j][2] == self.n:\n self.go = (turn, 'column ' + str(j))\n else:\n self.cols[j][0] = False\n\n# On boards of odd-sized 'n' (n = 3,5,7,etc...)\n# the middle square is part of both diagonals: 'step' and 'same':\n if i == j:\n if self.diags['same'][0]:\n if self.diags['same'][1] == \"\" or player[value] == self.diags['same'][1]:\n self.diags['same'][1] = turn\n self.diags['same'][2] += 1\n if self.diags['same'][2] == self.n:\n self.go = (turn, 'diagonal from 0,0 to n-1,n-1')\n else:\n self.diags['same'][0] = False\n\n if i + j + 1 == self.n:\n if self.diags['step'][0]:\n if self.diags['step'][1] == \"\" or player[value] == self.diags['step'][1]:\n self.diags['step'][1] = turn\n self.diags['step'][2] += 1\n if self.diags['step'][2] == self.n:\n self.go = (turn, 'diagonal from n-1,0 to 0,n-1')\n else:\n self.diags['step'][0] = False\n\n moveCount += 1\n print(turn, key)\n else:\n return self.go", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]" ]
[ "0.71533525", "0.67306685", "0.5900517", "0.58870995", "0.5859277", "0.5668452", "0.54721373", "0.5391912", "0.5355076", "0.5267006", "0.5263563", "0.5250836", "0.5222762", "0.52184594", "0.5217904", "0.51913744", "0.5180138", "0.51650697", "0.51289845", "0.5128813", "0.5125373", "0.5121981", "0.5111606", "0.5105289", "0.51018906", "0.50980693", "0.5093615", "0.50856686", "0.5067107", "0.5060177", "0.50528365", "0.50504595", "0.5039649", "0.50393796", "0.50362813", "0.50285345", "0.5023247", "0.5021923", "0.50196016", "0.50189453", "0.50120556", "0.5008478", "0.5004361", "0.500021", "0.49953046", "0.49945906", "0.49906912", "0.49811915", "0.4974783", "0.49620086", "0.4959036", "0.4951893", "0.49507964", "0.49310425", "0.49240783", "0.4915001", "0.49129763", "0.49008548", "0.48919958", "0.489004", "0.48882782", "0.48860928", "0.48792204", "0.48724347", "0.4870941", "0.48704946", "0.48693132", "0.48683086", "0.4864997", "0.48638973", "0.48602128", "0.4858537", "0.485634", "0.48502356", "0.48419252", "0.48342574", "0.48340967", "0.48226443", "0.48118582", "0.47941765", "0.479028", "0.4787714", "0.4787659", "0.47830456", "0.47715527", "0.47648817", "0.47640452", "0.4763622", "0.4763585", "0.47554025", "0.4753874", "0.47463554", "0.47411114", "0.47407314", "0.47407314", "0.47407314", "0.47407314", "0.47407314", "0.47407314", "0.47407314" ]
0.6491031
2
atoms_left returns the number of unguessed atoms still left
def atoms_left(self): return len(self._atoms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atoms_left(self):\n return self._atoms", "def atoms_left(self):\r\n return self._board.get_atoms()", "def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces", "def count_mass_left(self):\n self.mass_left_count = int(np.sum(self.array))", "def number_of_their_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, THEM, 'left')", "def count_left_players(definition):\n return int(parse_player_definition(definition)[1]['left_players'])", "def n_atoms(self) -> int:\n return 0 if self.atoms is None else len(self.atoms)", "def natoms(self):\n return len(self.atoms)", "def remaining(self):\n\t\tmines = sum(1 for _ in self.get_mines())\n\t\tmarked = sum(1 for x in range(self.width)\n\t\t\t\t\t for y in range(self.height) if self.marks[x][y] == FLAG)\n\t\treturn mines - marked", "def get_num_atoms(self):\n\n return len(self.atoms)", "def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def _contributions_left(self):\r\n if self.is_complete:\r\n return 0, 0\r\n online_left = self.online_quota - self.stats.num_online_contributions\r\n if online_left < 0:\r\n online_left = 0\r\n tickets_left = self.num_tickets_total - self.stats.num_tickets_redeemed\r\n return (online_left, tickets_left)", "def counting_effused_rafts(prev_centers, prev_count, curr_centers, curr_count, boundary_x, max_displacement):\n effused_raft_to_left = 0\n effused_raft_to_right = 0\n cost_matrix = scipy_distance.cdist(prev_centers[:prev_count], curr_centers[:curr_count], 'euclidean')\n # note that row index refers to previous raft number, column index refers to current raft number\n\n # select the boundary crossing to be in the middle of the cropped image, so only deals with existing rafts\n for raftID in np.arange(prev_count):\n if np.any(cost_matrix[raftID, :] < max_displacement): # raft still exist\n curr_raft_id = np.nonzero(cost_matrix[raftID, :] < max_displacement)[0][\n 0] # [0][0] is to convert array into scalar\n if (prev_centers[raftID, 0] >= boundary_x) and (curr_centers[curr_raft_id, 0] < boundary_x):\n effused_raft_to_left = effused_raft_to_left + 1\n elif (prev_centers[raftID, 0] < boundary_x) and (curr_centers[curr_raft_id, 0] >= boundary_x):\n effused_raft_to_right = effused_raft_to_right + 1\n return effused_raft_to_left, effused_raft_to_right", "def number_of_my_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, ME, 'left')", "def num_tickets_left(self):\r\n return self._contributions_left[1]", "def number_of_atoms(self):\n if self._number_of_atoms is None:\n if self.mol is not None:\n self._number_of_atoms = len(self.mol.atoms)\n elif not self.is_ts:\n self._number_of_atoms = len(self.get_xyz().splitlines())\n return self._number_of_atoms", "def count_all_atoms(self):\n n = 0\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n n += 1\n else:\n n += len(atm)\n return n", "def numAtoms(self):\n\n\t\tnatoms = 0\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tnatoms += residue.numAtoms()\n\n\t\treturn natoms", "def edges_left(self):\n return self._edges_left", "def num_online_left(self):\r\n return self._contributions_left[0]", "def count_all_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_all_atoms()\n return n", "def n_atoms(self):\n return self._n_atoms", "def left(self, obs, object):\n for i in range(int((self.no_rays-1)/2)):\n if(obs[self.listOfObjects.index(object)][i] > 0):\n # print(\"found \" + str(object) + \" left\")\n return True\n return False", "def misplaced_nodes(puzzle):\n\n\tcount = 0\n\tfor i in range(puzzle.dimension):\n\t\tfor j in range(puzzle.dimension):\n\t\t\tif (puzzle.board[i][j] != puzzle.final_state[i][j] and puzzle.board[i][j] != 0): count += 1\n\n\treturn count", "def numAtoms(self):\n return self.nAtoms", "def has_left(self):\n return self.l is not None", "def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)", "def nAtoms(self):\n if self._c_result is not NULL:\n return self._c_result.n_atoms\n return 0", "def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms", "def total_num_atoms(self):\n return self.GetNumberOfAtoms()", "def _has_left(self, j):\n return (2 * j + 1) < len(self)", "def _has_left(self, j):\n return self._left(j) < len(self._data)", "def left(self):\n self.counterUp(teamNumber = 1)", "def getExceedingBoxLeft(self):\n return self.exceedingBoxLeft", "def lefts(self):\n lstack = [len(self.arr)]\n i = len(self.arr) - 1\n while i >= 0:\n if self.arr[i] > self.arr[lstack[-1] - 1]:\n while lstack and self.arr[i] > self.arr[lstack[-1] - 1]:\n x = lstack.pop()\n self.left[x - 1] = i + 1\n lstack.append(i + 1)\n i -= 1", "def _get_left_height(self):\n if self.left is None:\n return 0\n return 1 + max(\n self.left._get_left_height(),\n self.left._get_right_height(),\n )", "def strikesLeft(self):\n return self.strikes > 0", "def size(self):\r\n return len(atoms)", "def badExitPrevMolecule(self):\n if self.molecules > 0:\n # collect list of any atoms where num departed is not expected num per molecule\n departErrors = [(atom.name, count) for atom, count in self.departed.items() if self.departed[atom] != atom.value]\n if len(departErrors) > 0:\n print(\"too many or too few atoms exited between previous and this molecule creations.\")\n print( \"Exit counts:\", departErrors)\n return False\n return True", "def nremaining(self) -> int:\n return self._nmines - self._nfound", "def migrations_left(self):\n return self._migrations_left", "def atom_count(self):\n return len(self.repeated_elements())", "def numAtoms(self, flag=None):\n\n return len(self._getSubset(flag)) if flag else self._n_atoms", "def has_left(self):\n return self.left != None", "def hintsLeft(self):\n return self.hints > 0", "def is_left(self):\n if self.pupils_located:\n return self.horizontal_ratio() >= 0.65", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def has_left(self):\n return self.__left != None", "def lackingAtomsForMolecule(self):\n createErrors = [(atom.name, count) for atom, count in self.available.items() if self.available[atom] < atom.value]\n if len(createErrors) > 0:\n print(\"too few atoms produced to create a new molecule\")\n print(\"Too low atom counts:\", createErrors)\n return False\n return True", "def getNumCleanedTiles(self):\n\t\tr = 0\n\t\tfor i in self.tiles:\n\t\t\tif i.isClean(): r += 1\n\t\treturn r", "def movesLeft(self):\n \n return \" \" in self.squares", "def __len__(self):\n return self.rdmol.GetNumAtoms()", "def isLeft(self):\n return self.left", "def get_num_ring_atoms(input_mol):\n num_ring_atoms = 0\n split_index = 0\n split_indices = []\n for atom in input_mol.GetAtoms():\n if atom.IsInRing():\n num_ring_atoms += 1\n else:\n split_indices.append(split_index)\n split_index += 1\n return num_ring_atoms, split_indices", "def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy < 0:\n count += 1\n return count", "def __len__(self):\n \n return len(self.num_atoms)", "def _has_left(self, index):\r\n return self._left(index) < len(self)", "def exitPreMolecule(self):\n if self.molecules == 0 and max(self.departed.values()) > 0:\n print(\"these atoms exited before first molecule created: \", end=\"\")\n print([atom.name for atom,count in self.departed.items() if count>0])\n return False\n return True", "def getNumAtoms(self):\n return int(self._getPointerValue('NATOM'))", "def get_number_of_atoms(self):\n natoms = 0\n\n with open(self.path, 'r') as f:\n line = f.readline()\n while line != '' and natoms == 0:\n # Automatically determine the number of atoms\n if 'CARTESIAN COORDINATES (ANGSTROEM)' in line and natoms == 0:\n for i in range(2):\n line = f.readline()\n\n while '---------------------------------' not in line:\n natoms += 1\n line = f.readline()\n if not line.strip():\n f.close()\n return natoms\n line = f.readline()", "def is_left_unbounded(self):\n return self.left is S.NegativeInfinity or self.left == Float(\"-inf\")", "def number_of_new_components(self):\n t_low = self.lower_binary_tree().to_tilting()\n t_up = self.upper_binary_tree().to_tilting()\n return len([p for p in t_low if p in t_up])", "def left_distance(self):\n return self.x", "def findLeftContext(tree, start, ignore):\t\n nrOfClosingBrs = 0\n nrOfOpeningBrs = 0\n firstPass = True\n for currentIndex in range(start-1,-1,-1):\n if tree[currentIndex].symbol in ignore:\n continue\n elif tree[currentIndex].symbol == \"[\":\n if not firstPass:\n nrOfOpeningBrs = nrOfOpeningBrs + 1\n elif tree[currentIndex].symbol == \"]\":\n nrOfClosingBrs = nrOfClosingBrs + 1\n elif nrOfClosingBrs == nrOfOpeningBrs:\n return(tree[currentIndex])\n firstPass = False\n return(emptyModule())", "def number_of_heavy_atoms(self):\n if self._number_of_heavy_atoms is None:\n if self.mol is not None:\n self._number_of_heavy_atoms = len([atom for atom in self.mol.atoms if atom.isNonHydrogen()])\n elif self.final_xyz is not None or self.initial_xyz is not None:\n self._number_of_heavy_atoms = len([line for line in self.get_xyz().splitlines()\n if line.split()[0] != 'H'])\n elif self.is_ts:\n for ts_guess in self.ts_guesses:\n if ts_guess.xyz is not None:\n self._number_of_heavy_atoms =\\\n len([line for line in ts_guess.xyz.splitlines() if line.split()[0] != 'H'])\n return self._number_of_heavy_atoms", "def count_all_atoms(self):\n n = 0\n for model in self.iter_models():\n n += model.count_all_atoms()\n return n", "def num_atoms(self):\n return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]", "def __len__(self):\n return len(self.atom_rings)", "def left(cls):\n student_count = func.count(Student.id).label('student_count')\n recent_years = [sy.id for sy in SchoolYear.recent()]\n stmt = Session.query(student_count, Student).\\\n join(GroupMembership).\\\n join(Group).group_by(Group.id).\\\n filter(Group.year_id.in_(recent_years)).subquery()\n max = Session.query(func.max(stmt.c.student_count)).first()[0]\n count = Session.query(func.count(LuckyNumber.id)).first()[0]\n past = Session.query(LuckyNumber.number).\\\n order_by(desc(LuckyNumber.date)).\\\n limit(count % max).all()\n\n all = set(range(1, max + 1))\n past = set(x[0] for x in past)\n left = list(all.difference(past))\n left.sort()\n return left", "def getNumCleanedTiles(self):\n counter = 0\n for tile in self.tiles:\n if self.tiles[tile] == 'clean':\n counter += 1\n return counter", "def num_online_left_for_user(self, user):\r\n q = self.contribution_set.filter(contributor=user)\r\n num_user_contribs = sum([c.qty for c in q])\r\n remaining = self.max_contributions_per_person - num_user_contribs\r\n if remaining < 0:\r\n remaining = 0\r\n left = min(remaining, self.num_online_left)\r\n return left", "def test_left(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.leave(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"left_members\"] - r1stats_ante[\"left_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], -1\n )", "def n_aromatic_atoms(mol: Mol) -> int:\n matches = mol.GetSubstructMatches(_AROMATIC_QUERY)\n return len(matches)", "def _left(node):\n return 2 * node + 1", "def malus_roles_left(players_left, roles_left):\n\n # Permutations of the players still to be deployed. We do that because\n # we only want that combination of players in which ALL of them are\n # deployed\n players_perm = permutations(players_left, len(players_left))\n\n # Initialize the number of malus (just a number high enough)\n fin_malus = 10\n\n # For each permutation of players to be deployed\n for perm in players_perm:\n\n # Initialize two parameters: a counter and the number of malus for\n # this specific permutation. Counter is used to be sure all the\n # players in the permutation are checked\n count = 0\n temp_malus = 0\n\n # Make a copy of the roles to be covered so we can use it later to\n # delete roles that we are able to cover\n copy_of_adapted_roles = copy.copy(roles_left)\n\n # For each element in the permutation we select the corresponding\n # role and try to cover it\n for i in range(len(perm)):\n role_to_cover = roles_left[i]\n role_cand = perm[i][2]\n\n # If it is possible to cover it with a malus we increase the\n # number of malus and the counter and then remove the role from\n # the list of the roles still uncovered\n if role_to_cover in malus_roles[role_cand]:\n temp_malus += 1\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # If it is possible to cover it with no malus we just increase\n # the counter and delete the role\n elif (role_to_cover not in malus_roles[role_cand]\n and role_to_cover in compatible_roles[role_cand]):\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # Else we interrupt checking this permutation and go to the\n # one\n else:\n break\n\n # If we checked ALL the elements in the permutation and the number\n # of malus is lower than the previous value we store it\n if count == len(perm) and temp_malus < fin_malus:\n fin_malus = temp_malus\n\n # If this value is different from the default one it means we found a\n # solution and we return it\n if fin_malus != 10:\n return fin_malus\n else:\n return False", "def getCargoSpaceLeft(self):\n spaceused = self.cargo[\"wood\"] + self.cargo[\"coal\"] + self.cargo[\"uranium\"]\n if self.type == UNIT_TYPES.WORKER:\n return GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\"WORKER\"] - spaceused\n else:\n return GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\"CART\"] - spaceused", "def left(self):\n\t\treturn self._left", "def allPreExited(self, curAtom):\n if self.departed[curAtom] == curAtom.value:\n print(\"No %s atoms left in molecule to exit\" % curAtom.name)\n return False\n return True", "def hasLeftSon(self):\n \n return self._leftSon is not None", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def dots_left(self):\n return (len(self.top_row) +\n len(self.bottom_row) +\n len(self.left_col) +\n len(self.right_col))", "def left_height(self):\n if not self.left:\n return -1\n return self.left.height", "def is_left_coset(self):\n return str(self._dir) == '-'", "def ht(node):\n n = 0\n while node: n, node = n+1, node.left\n return n", "def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1", "def heavy_atoms(row):\n m = Chem.MolFromSmiles(row.SMILES)\n heavy_atom_count = Descriptors.HeavyAtomCount(m)\n\n return heavy_atom_count", "def timesLeft(self)->int:\n return self.maxTimes - self.timesUsed", "def number_of_favored_ligand_residues(self, ion_params, distance = 3.0,\n exclude_atoms = ()):\n n_res = 0\n resids = []\n for contact in self.nearby_atoms:\n if (contact.atom_name() in exclude_atoms):\n continue\n if (contact.distance() < distance):\n labels = contact.atom.fetch_labels()\n other_resname = contact.resname()\n other_resid = labels.chain_id + labels.resid()\n if ((ion_params.allowed_coordinating_residues is not None) and\n (other_resname in ion_params.allowed_coordinating_residues) and\n (not other_resid in resids)):\n n_res += 1\n resids.append(other_resid)\n return n_res", "def getNumCleanedTiles(self):\n return len(self.CleanBlocks)", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def __len__(self):\n\t\treturn len(self._idle) + len(self._running)", "def bcLeft(self):\n return self.__bcLeft", "def bcLeft(self):\n return self.__bcLeft", "def left(self):\n n = len(self)-1\n fl = np.zeros(n+1)\n for ii in range(n+1):\n for jj in range(n+1-ii):\n for kk in range(n+1-ii-jj):\n fl[ii+jj] += self[ii,jj,kk]\n return fl", "def h(self, node):\n count_peg = -1\n for line in node.state.board:\n count_peg += line.count(c_peg())\n return count_peg", "def __remove_01_connected_juncs(self) -> int:\n total_removed = 0\n while True:\n removed = 0\n for junc in self.get_all_juncs():\n if junc.connections_count() <= 1:\n self.remove_junction(junc)\n removed += 1\n if removed == 0:\n break\n total_removed += removed\n return total_removed", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def left(self) -> float:\n points = self.get_adjusted_points()\n x_points = [point[0] for point in points]\n return min(x_points)" ]
[ "0.7521378", "0.74469197", "0.6469958", "0.6355561", "0.6071514", "0.6057757", "0.6004638", "0.59833574", "0.59616107", "0.5921324", "0.5888561", "0.5864343", "0.5864343", "0.5818085", "0.57945406", "0.57681686", "0.5768063", "0.5694106", "0.56850934", "0.56428653", "0.55966717", "0.5595428", "0.5573298", "0.5572095", "0.5534848", "0.5533742", "0.55281496", "0.5520084", "0.55118674", "0.55111206", "0.5477485", "0.5464806", "0.54611015", "0.5455408", "0.54353034", "0.5422384", "0.54176265", "0.5388858", "0.53875977", "0.5384478", "0.5344341", "0.5344194", "0.5340228", "0.53288186", "0.53287715", "0.53258264", "0.5321895", "0.5297159", "0.5287971", "0.5258961", "0.525099", "0.52504104", "0.52232176", "0.5222938", "0.5210906", "0.5199556", "0.5192024", "0.5189443", "0.5175482", "0.5172869", "0.5172195", "0.51690114", "0.5168419", "0.51651156", "0.51501566", "0.5133133", "0.5126033", "0.512042", "0.51188314", "0.51151747", "0.5113523", "0.5065914", "0.50638133", "0.5046553", "0.50413245", "0.5036473", "0.50218886", "0.50205624", "0.5011483", "0.50101775", "0.5008495", "0.500706", "0.4998551", "0.49985355", "0.49926823", "0.49909064", "0.49804693", "0.4978391", "0.4978351", "0.49686262", "0.49641687", "0.49570945", "0.49383727", "0.49353406", "0.49353406", "0.49121994", "0.4906311", "0.48988152", "0.48978758", "0.48977053" ]
0.84319353
0
Test LSTM, LayerNormLSTM and NAS gnmt encoder. GNMT has only a single bi directional layer, and num_layers1 uni layers. time_major=True
def runLSTMEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, sequence_inputs=inputs_ph, sequence_length=inputs_length_ph) num_bi_layers = 1 num_uni_layers = num_layers - num_bi_layers if num_uni_layers == 1: # states is a tuple of (states_bi_bw, states_uni) # states_bi_bw is a tuple (states_bi_bw) # states_uni is a tuple of length num_uni_layers states_bi_bw, states_uni = states self.assertEqual(1, len(states_bi_bw)) self.assertEqual(num_uni_layers, len(states_uni)) # states_bi_bw[0] is a tuple of (states_c, states_h) self.assertEqual(2, len(states_bi_bw[0])) # convert states from tuple to tensor states_list = [states_bi_bw[0]] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) else: # states is a tuple of (states_uni) of length num_uni_layers states_uni = states self.assertEqual(num_uni_layers, len(states_uni)) states_list = [] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) inputs, inputs_length = common_utils.get_encoder_test_inputs() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs, states = sess.run( [outputs, states], feed_dict={ inputs_ph: inputs, inputs_length_ph: inputs_length }) self.assertAllEqual( [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH], outputs.shape) if num_uni_layers == 1: self.assertEqual(num_layers, len(states)) # 2 in second dimension means states_c and states_h self.assertAllEqual( [num_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape) else: self.assertEqual(num_uni_layers, len(states)) self.assertAllEqual( [num_uni_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_model_with_lstm_layer_time_major_true(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Defaults\n # return_state=False, unit_forget_bias=True\n # return_sequences=False, time_major=False\n x = tf.keras.layers.LSTM(12,\n time_major=True,\n name='lstm_tm')(inputs)\n\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_time_major_true', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM' and op.name == 'lstm_tm':\n self.assertEqual(op.pattern_type, 'LSTM_TimeMajor_True')\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(85, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm_tm/while/MatMul'))\n self.assertTrue(lstm_detected)\n self.validate_internal_structure_lstm(inner_list)", "def test_model_with_lstm_layer_deepspeech_time_major_true(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Defaults\n # return_state=False, unit_forget_bias=True\n # return_sequences=False, time_major=False\n x = tf.keras.layers.LSTM(12,\n unit_forget_bias=False,\n time_major=True,\n return_sequences=True,\n name='lstm_stacked')(inputs)\n\n x2 = tf.keras.layers.LSTM(12, name='last_lstm')(x)\n\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x2)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_deepspeech', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n self.assertEqual(5, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM' and op.name == 'lstm_stacked':\n self.assertEqual(op.pattern_type, 'LSTM_Stacked_TimeMajor_True')\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(84, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm_stacked/while/MatMul'))\n self.assertTrue(lstm_detected)\n self.validate_internal_structure_lstm(inner_list)", "def test_model_with_lstm_layer_deepspeech_time_major_false(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Defaults\n # return_state=False, unit_forget_bias=True\n # return_sequences=False, time_major=False\n # use both return state and return sequence\n x, state_h, state_c = tf.keras.layers.LSTM(12, return_state=True,\n return_sequences=True,\n name='lstm_stacked')(inputs)\n\n x2 = tf.keras.layers.LSTM(12, name='last_lstm')(x)\n\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x2)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_deepspeech', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n self.assertEqual(5, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM' and op.name == 'lstm_stacked':\n self.assertEqual(op.pattern_type, 'LSTM_Stacked')\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(86, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm_stacked/while/MatMul'))\n self.assertTrue(lstm_detected)\n self.validate_internal_structure_lstm(inner_list)", "def test_model_with_lstm_layer_deepspeech_time_major_true_sigmoid(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Defaults\n # return_state=False, unit_forget_bias=True\n # return_sequences=False, time_major=False\n x = tf.keras.layers.LSTM(12,\n recurrent_activation='sigmoid',\n unit_forget_bias=False,\n time_major=True,\n return_sequences=True,\n name='lstm_stacked')(inputs)\n\n x2 = tf.keras.layers.LSTM(12, name='last_lstm')(x)\n\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x2)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_deepspeech', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n self.assertEqual(5, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM' and op.name == 'lstm_stacked':\n self.assertEqual(op.pattern_type, 'LSTM_Stacked_TimeMajor_True_Sigmoid')\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(75, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm_stacked/while/MatMul'))\n self.assertTrue(lstm_detected)", "def test_lstm_two_layers(self):\n\n class MultipleLayersLSTM(nn.Module):\n def __init__(self):\n super(MultipleLayersLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(10, 20, 2, bidirectional=False)\n self.rnn.training = False\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(5, 3, 10)\n h = torch.randn(2, 3, 20)\n c = torch.randn(2, 3, 20)\n model = MultipleLayersLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def test_lstm_basic(self):\n\n class SimpleLSTM(nn.Module):\n def __init__(self):\n super(SimpleLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(12, 10, 1)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 12)\n b1 = torch.randn(40)\n b2 = torch.randn(40)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)\n self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(10, 3, 12)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def test_model_with_basic_lstm_layer(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.LSTM(12, name='lstm0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, LSTM , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM':\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(86, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm0/while/MatMul'))\n self.assertEqual('lstm0', op.name)\n self.assertTrue(lstm_detected)\n\n valid_matmuls = []\n valid_muls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'Mul' and op not in valid_matmuls:\n valid_muls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Tanh' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(8, len(valid_matmuls))\n self.assertEqual(7, len(valid_muls))\n self.assertEqual(4, len(valid_bias_add))\n self.assertEqual(2, len(valid_activation))", "def main():\r\n datasetrootdir, resultrootdir, modelrootdir, normal, mutant, savebinary, train_params = io_utils.arg_parse(\r\n 'Test LSTM')\r\n tag = normal + '_vs_' + mutant\r\n\r\n normal_dir_name = os.path.join(datasetrootdir, normal)\r\n mutant_dir_name = os.path.join(datasetrootdir, mutant)\r\n\r\n try:\r\n # load data of normal worms\r\n normal_data, normalfile = io_utils.get_data(os.path.join(normal_dir_name, const.featuredir))\r\n # normal_data = io_utils.normalize_list(normal_data, bias=0.1)\r\n # load data of mutant worms\r\n mutant_data, mutantfile = io_utils.get_data(os.path.join(mutant_dir_name, const.featuredir))\r\n # mutant_data = io_utils.normalize_list(mutant_data, bias=0.1)\r\n\r\n normal_data, mutant_data = io_utils.normalize_list(normal_data, mutant_data, bias=0.1)\r\n print('data loaded')\r\n\r\n maxlen = io_utils.get_max_length(normal_data, mutant_data)\r\n print('maxlen: ' + str(maxlen))\r\n split_percentage = 4.0 / 5.0\r\n batch_size = 64\r\n\r\n X_normal_train, X_normal_test = io_utils.splitData_by_random(normal_data, split_percentage)\r\n X_mutant_train, X_mutant_test = io_utils.splitData_by_random(mutant_data, split_percentage)\r\n\r\n F_normal_train, F_normal_test = io_utils.splitData_by_random(normalfile, split_percentage)\r\n F_mutant_train, F_mutant_test = io_utils.splitData_by_random(mutantfile, split_percentage)\r\n\r\n # transform the list to same sequence length\r\n X_normal_test = sequence.pad_sequences(X_normal_test, maxlen=maxlen, dtype='float64', padding='post',\r\n truncating='post')\r\n X_mutant_test = sequence.pad_sequences(X_mutant_test, maxlen=maxlen, dtype='float64', padding='post',\r\n truncating='post')\r\n\r\n # load model\r\n if os.path.exists(lstm.model_path(modelrootdir, tag=tag)):\r\n print('loading model...')\r\n model = load_model(lstm.model_path(modelrootdir, tag=tag))\r\n model.summary()\r\n else:\r\n print('model ' + lstm.model_path(modelrootdir, tag=tag) + ' not found')\r\n return\r\n\r\n test(model, X_normal_test, X_mutant_test, batch_size=batch_size, normal=normal, mutant=mutant,\r\n F_normal_test=F_normal_test, F_mutant_test=F_mutant_test, savedir=os.path.join(resultrootdir, tag))\r\n\r\n # diff(model, normal_data, mutant_data, batch_size, normalfile, mutantfile,\r\n # os.path.join(resultrootdir,tag,normal), os.path.join(resultrootdir,tag,mutant),maxlen)\r\n\r\n # get output of intermediate layer\r\n normal_data = sequence.pad_sequences(normal_data, maxlen=maxlen, dtype='float64', truncating='post',\r\n padding='post')\r\n mutant_data = sequence.pad_sequences(mutant_data, maxlen=maxlen, dtype='float64', truncating='post',\r\n padding='post')\r\n \r\n write_intermediate_output(model, normal_data, mutant_data, batch_size, normalfile, mutantfile,\r\n os.path.join(resultrootdir, tag, normal), os.path.join(resultrootdir, tag, mutant),\r\n timesteps=maxlen, savebinary=savebinary)\r\n\r\n except:\r\n traceback_error = traceback.format_exc()\r\n print('traceback:' + str(traceback_error))\r\n print('[fail]')\r\n sys.exit(1)\r\n\r\n print('[success]')", "def set_model(self, n_lstm_layers=3, n_lstm_nodes=150, n_dense_1=1, n_nodes_dense_1=300, n_dense_2=4, n_nodes_dense_2=200, dropout_rate=0.1, learning_rate=0.001, batch_norm=True, batch_momentum=0.99):\n\n input_objects = keras.layers.Input(shape=(len(self.low_level_vars), len(self.low_level_vars[0])), name='input_objects') \n input_global = keras.layers.Input(shape=(len(self.high_level_vars),), name='input_global')\n lstm = input_objects\n decay = 0.2\n for i_layer in range(n_lstm_layers):\n #lstm = keras.layers.LSTM(n_lstm_nodes, activation='tanh', kernel_regularizer=keras.regularizers.l2(decay), recurrent_regularizer=keras.regularizers.l2(decay), bias_regularizer=keras.regularizers.l2(decay), return_sequences=(i_layer!=(n_lstm_layers-1)), name='lstm_{}'.format(i_layer))(lstm)\n lstm = keras.layers.LSTM(n_lstm_nodes, activation='tanh', return_sequences=(i_layer!=(n_lstm_layers-1)), name='lstm_{}'.format(i_layer))(lstm)\n\n #inputs to dense layers are output of lstm and global-event variables. Also batch norm the FC layers\n dense = keras.layers.concatenate([input_global, lstm])\n for i in range(n_dense_1):\n dense = keras.layers.Dense(n_nodes_dense_1, activation='relu', kernel_initializer='he_uniform', name = 'dense1_%d' % i)(dense)\n if batch_norm:\n dense = keras.layers.BatchNormalization(name = 'dense_batch_norm1_%d' % i)(dense)\n dense = keras.layers.Dropout(rate = dropout_rate, name = 'dense_dropout1_%d' % i)(dense)\n\n for i in range(n_dense_2):\n dense = keras.layers.Dense(n_nodes_dense_2, activation='relu', kernel_initializer='he_uniform', name = 'dense2_%d' % i)(dense)\n #add droput and norm if not on last layer\n if batch_norm and i < (n_dense_2 - 1):\n dense = keras.layers.BatchNormalization(name = 'dense_batch_norm2_%d' % i)(dense) \n if i < (n_dense_2 - 1):\n dense = keras.layers.Dropout(rate = dropout_rate, name = 'dense_dropout2_%d' % i)(dense)\n\n output = keras.layers.Dense(1, activation = 'sigmoid', name = 'output')(dense)\n #optimiser = keras.optimizers.Nadam(lr = learning_rate)\n optimiser = keras.optimizers.Adam(lr = learning_rate)\n\n model = keras.models.Model(inputs = [input_global, input_objects], outputs = [output])\n model.compile(optimizer = optimiser, loss = 'binary_crossentropy')\n self.model = model", "def build_test_network(self):\n # Inputs\n vid_input = tf.placeholder(tf.float32, [None, self.num_frame, self.feat_size])\n batch_size = tf.shape(vid_input)[0]\n \n # State variables\n v_LSTM_states = (tf.zeros((batch_size, self.v_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.v_LSTM_cell.state_size[1])))\n t_LSTM_states = (tf.zeros((batch_size, self.t_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.t_LSTM_cell.state_size[1])))\n padding = tf.zeros([batch_size, self.state_size])\n\n outputs = [] \n loss = 0.0\n # Encoder network\n # vid_input_list = tf.split(vid_input, self.num_frame, 1)\n with tf.variable_scope(tf.get_variable_scope()):\n for idx in range(self.num_frame):\n if idx > 0:\n tf.get_variable_scope().reuse_variables()\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(vid_input[:,idx,:], v_LSTM_states)\n with tf.variable_scope('t_LSTM'):\n _, t_LSTM_states = self.t_LSTM_cell(tf.concat([padding, v_output], 1), t_LSTM_states)\n \n null_video = tf.zeros([batch_size, self.feat_size])\n for idx in range(self.sent_len):\n tf.get_variable_scope().reuse_variables()\n if idx == 0:\n caption_embed = tf.nn.embedding_lookup(self.word_embed, tf.ones([batch_size], dtype=tf.int64))\n # Decoder network\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(null_video, v_LSTM_states) \n # pdb.set_trace()\n with tf.variable_scope('t_LSTM'):\n t_output, t_LSTM_states = self.t_LSTM_cell(tf.concat([caption_embed, v_output], 1), t_LSTM_states)\n logit_output = tf.nn.xw_plus_b(t_output, self.t_output_W, self.t_output_b)\n \n # Produce output\n # pdb.set_trace()\n max_prob_index = tf.argmax(logit_output, 1)\n outputs.append(max_prob_index)\n\n caption_embed = tf.nn.embedding_lookup(self.word_embed, max_prob_index)\n # caption_embed = tf.expand_dims(caption_embed, 0)\n \n return dict(\n x = vid_input,\n outputs = outputs\n )", "def test_lstm_batch_first(self):\n\n class SimpleBatchFirstLSTM(nn.Module):\n def __init__(self):\n super(SimpleBatchFirstLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(12, 10, 1, batch_first=True)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 12)\n b1 = torch.randn(40)\n b2 = torch.randn(40)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)\n self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(3, 10, 12)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleBatchFirstLSTM()\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def _main():\n\n # define batch_size, channels, height, width\n batch_size, channels, height, width = 64, 2, 30, 30\n hidden_size = 64 # 64 # hidden state size\n lr = 1e-5 # learning rate\n n_frames = 11 # sequence length\n max_epoch = 30 # number of epochs\n\n convlstm_dataset = convLSTM_Dataset_dxdy(dataset_dir='../dataset/resample_skipping_stride1',\n n_class=4,\n transform=transforms.Compose([\n RandomHorizontalFlip(),\n RandomVerticalFlip(),\n ToTensor(),\n ])\n )\n\n\n train_sampler, test_sampler = random_split_customized(convlstm_dataset, train_ratio=0.9)\n\n train_dataloader = DataLoader(convlstm_dataset, batch_size=batch_size, sampler=train_sampler,\n num_workers=4)\n test_dataloader = DataLoader(convlstm_dataset, batch_size=batch_size, sampler=test_sampler,\n num_workers=4)\n\n test_size = len(test_sampler)\n for n_frames_ahead in range(1, 6):\n print('Instantiating model.............')\n model = ConvLSTMChained(n_frames_ahead=n_frames_ahead, n_frames=n_frames)\n print(repr(model))\n\n # print model.state_dict()\n\n # load pretrained_model_diction\n path_pred = './saved_model/convlstm_frame_predict_20190415_400epochs_4000data_flipped_{}f_ahead.pth'.format(n_frames_ahead)\n path_detect = './saved_model/convlstm__model_1layer_augmented_11frames_400epochs_20190415.pth'\n\n path_dict = {'pred_net': path_pred, 'detect_net': path_detect}\n\n load_state_dict(model, path_dict)\n\n # IPython.embed()\n\n if torch.cuda.is_available():\n # print 'sending model to GPU'\n model = model.cuda()\n\n print('Create input and target Variables')\n x = Variable(torch.rand(n_frames, batch_size, channels, height, width))\n # y = Variable(torch.randn(T, b, d, h, w))\n y = Variable(torch.rand(batch_size))\n\n print('Create a MSE criterion')\n loss_fn = nn.CrossEntropyLoss()\n # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.01)\n\n # IPython.embed()\n\n\n\n\n import time\n\n model = model.eval()\n\n test_loss = 0\n n_right = 0\n\n start = time.time()\n for test_step, test_sample_batched in enumerate(test_dataloader):\n\n start = time.time()\n\n model.output_list = {'pred': [], 'detect': []}\n\n x = test_sample_batched['frames']\n y = test_sample_batched['target']\n x = torch.transpose(x, 0, 1)\n # x = x.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n # print 'sending input and target to GPU'\n x = x.type(torch.cuda.FloatTensor)\n y = y.type(torch.cuda.FloatTensor)\n\n prev = {'pred': None, 'detect': None}\n\n for t in range(0, n_frames):\n out_test, prev = model(t, x[t], prev)\n\n y = y.long()\n\n test_loss += loss_fn(out_test, y).item() * batch_size\n # Compute accuracy\n _, argmax_test = torch.max(out_test, 1)\n # print argmax_test\n # print y\n n_right += sum(y == argmax_test.squeeze()).item()\n\n # print '[TIME] the forward time: {}'.format(time.time() - start)\n # print n_right\n test_loss_reduced = test_loss / test_size\n test_accuracy = float(n_right) / test_size\n\n print ('[ TEST set] Step {}, Loss: {:.6f}, Acc: {:.4f}'.format(\n test_step + 1, test_loss_reduced, test_accuracy))", "def test_model(epoch):\n model.eval()\n test_metrics = {\"loss\": [], \"acc\": []}\n timer = Timer()\n for batch_i, (X, y) in enumerate(test_dataloader):\n batch_i += 1\n image_sequences = Variable(X.to(device), requires_grad=False)\n labels = Variable(y, requires_grad=False).to(device)\n\n with torch.no_grad():\n # Reset LSTM hidden state\n model.lstm.reset_hidden_state()\n # Get sequence predictions\n predictions = model(image_sequences)\n\n # Compute metrics\n loss = criterion(predictions, labels)\n acc = (predictions.detach().argmax(1) == labels).cpu().numpy().mean()\n\n # Keep track of loss and accuracy\n test_metrics[\"loss\"].append(loss.item())\n test_metrics[\"acc\"].append(acc)\n\n # Determine approximate time left\n batches_done = batch_i - 1\n batches_left = len(test_dataloader) - batches_done\n time_left = datetime.timedelta(seconds=batches_left * timer.seconds())\n time_iter = round(timer.seconds(), 3)\n timer.reset()\n\n # Log test performance\n logger.info(\n f'Testing - [Epoch: {epoch}/{cfg.train.num_epochs}] [Batch: {batch_i}/{len(test_dataloader)}] [Loss: {np.mean(test_metrics[\"loss\"]):.3f}] [Acc: {np.mean(test_metrics[\"acc\"]):.3f}] [ETA: {time_left}] [Iter time: {time_iter}s/it]'\n )\n\n writer.add_scalar(\"test/loss\", np.mean(test_metrics[\"loss\"]), epoch)\n writer.add_scalar(\"test/acc\", np.mean(test_metrics[\"acc\"]), epoch)\n\n model.train()", "def test_model_with_lstm_layer_sigmoid(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.LSTM(12, recurrent_activation='sigmoid', name='lstm0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_sigmoid', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, LSTM , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM':\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(77, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm0/while/MatMul'))\n self.assertEqual('lstm0', op.name)\n self.assertTrue(lstm_detected)\n\n valid_matmuls = []\n valid_muls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'Mul' and op not in valid_matmuls:\n valid_muls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Sigmoid' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(8, len(valid_matmuls))\n self.assertEqual(4, len(valid_muls))\n self.assertEqual(4, len(valid_bias_add))\n self.assertEqual(3, len(valid_activation))", "def unit_test():\n test_net = SqueezeNetSqueezeLSTM(20, 6)\n test_net_output = test_net(\n Variable(torch.randn(5, 36, 94, 168)),\n Variable(torch.randn(5, 8, 23, 41)))\n logging.debug('Net Test Output = {}'.format(test_net_output))\n logging.debug('Network was Unit Tested')\n print(test_net.num_params())\n # for param in test_net.parameters():", "def plr_slr(bs_seq_len_list, alg):\n import tensorflow as tf\n import numpy as np\n import scipy.io.wavfile\n from tensorflow.contrib import rnn\n import math\n import os\n import sys\n import time\n import os\n import random\n\n throughput_list = []\n\n #TODO:\n #Make LS_LSTM with PLR\n #Make SRU with PLR\n #Make QRNN with PLR\n #Make LS_LSTM with SLR\n #Make SRU with SLR\n #Make QRNN with SLR\n \n\n for seq_len in seq_len_list:\n #First generate the LS-LSTM and work out the throughput\n tf.reset_default_graph() \n n_hidden = 256\n n_classes = 2\n n_steps = seq_len\n batch_size = 65536 // seq_len\n bs = batch_size\n print(\"Batch size is {} and sequence length is {}\".format(bs, seq_len))\n n_input = 24\n n_layers = 2\n forget_gate_init = 1.0 # = 1/(n_in). We use uniform p(x)\n #Training Parameters\n sn = 1.0 / math.sqrt(n_hidden)\n learning_rate = 0.001\n training_iters = 5000000\n\n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_hidden, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n\n layer1 = linear_surrogate_lstm(x, n_hidden, alg=alg, name='ls-lstm')\n outputs = linear_surrogate_lstm(layer1, n_hidden, alg=alg, name='ls-lstm2') \n pred = tf.matmul(outputs[-1], W1) + b1\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n ls_lstm_tp = (bs * n_steps) / np.mean(times)\n\n\n\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_hidden, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = linear_surrogate_lstm(x, n_hidden, alg=Alg.SERIAL_BASELINE, name='ls-lstm')\n output = linear_surrogate_lstm(layer1, n_hidden, alg=Alg.SERIAL_BASELINE, name='ls-lstm') \n pred = tf.matmul(output[-1], W1) + b1\n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n s_ls_lstm_tp = (bs * n_steps) / np.mean(times)\n\n # throughput_list.append([ls_lstm_tp, s_ls_lstm_tp])\n # continue\n\n\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = SRU(x, alg=alg, name='SRU_1')\n output = SRU(layer1, alg=alg, name='SRU_2')\n pred = tf.matmul(output[-1], W1) + b1 \n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n sru_tp = (bs * n_steps) / np.mean(times) \n\n # Serial SRU\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = SRU(x, alg=Alg.SERIAL_BASELINE, name='s_SRU_1')\n output = SRU(layer1, alg=Alg.SERIAL_BASELINE, name='s_SRU_2')\n pred = tf.matmul(output[-1], W1) + b1 \n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n s_sru_tp = (bs * n_steps) / np.mean(times)\n\n\n ########################################## QRNN 2 \n\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = QRNN(x, 2, alg=alg, name='QRNN_1')\n output = QRNN(layer1, 2, alg=alg, name='QRNN_2')\n pred = tf.matmul(output[-1], W1) + b1\n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n qrnn_2_tp = (bs * n_steps) / np.mean(times)\n\n\n # Serial QRNN 2\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = QRNN(x, 2, alg=Alg.SERIAL_BASELINE, name='s_QRNN_3')\n output = QRNN(layer1, 2, alg=Alg.SERIAL_BASELINE, name='s_QRNN_4')\n pred = tf.matmul(output[-1], W1) + b1\n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n s_qrnn_2_tp = (bs * n_steps) / np.mean(times)\n print(np.mean(times))\n print(np.std(times))\n\n\n ########################################## QRNN 10\n\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = QRNN(x, 10, alg=alg, name='QRNN_2')\n output = QRNN(layer1, 10, alg=alg, name='QRNN_6')\n pred = tf.matmul(output[-1], W1) + b1\n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n qrnn_10_tp = (bs * n_steps) / np.mean(times)\n\n\n # Serail QRNN 10\n tf.reset_default_graph() \n x = tf.placeholder(\"float\", [n_steps, batch_size, n_input])\n y = tf.placeholder(\"float\", [batch_size, n_classes])\n tf.get_variable_scope().reuse == True\n W1 = tf.get_variable('W1', initializer=\n tf.random_normal([n_input, n_classes]), dtype='float')\n b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')\n layer1 = QRNN(x, 10, alg=Alg.SERIAL_BASELINE, name='s_QRNN_7')\n output = QRNN(layer1, 10, alg=Alg.SERIAL_BASELINE, name='s_QRNN_8')\n pred = tf.matmul(output[-1], W1) + b1\n\n #Evaluate network, run adam and clip gradients\n ################################################################################\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)\n raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))\n gradients = raw_gradients\n optimizer = optimizer_0.apply_gradients(zip(gradients, variables))\n init = tf.global_variables_initializer()\n\n #Initialise the model and evaluate\n step = 0\n times = []\n x_in = np.random.random((n_steps, batch_size, n_input))\n y_in = np.random.random((batch_size, n_classes))\n with tf.device(\"gpu:0\"):\n with tf.Session() as sess:\n sess.run(init)\n while step < 10:\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n step += 1\n if step != 0:\n start = time.time()\n out = sess.run(pred, feed_dict={x: x_in, y: y_in})\n finish = time.time()\n times.append(finish - start)\n s_qrnn_10_tp = (bs * n_steps) / np.mean(times)\n\n \n throughput_list.append([ls_lstm_tp, s_ls_lstm_tp, \n sru_tp, s_sru_tp, \n qrnn_2_tp, s_qrnn_2_tp,\n qrnn_10_tp, s_qrnn_10_tp])\n return throughput_list", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def main():\n\n batch_size = 30\n feature_size = 33\n hidden_dim = 128\n n_layers = 2\n out_dim = 6\n\n # EXP = f\"LSTM_genre6_all_38\"\n #EXP = \"dim256_layer3\"\n EXP = f\"ExperimentalRNN_genre6_cqt_33_batch30\"\n\n print(\"Preprocessing all data from scratch....\")\n dev_dataset = MusicDataset(f\"./data/adev6_cqt_33_128_feature.pkl\",\n f\"./data/adev6_cqt_33_128_target.pkl\")\n train_dataset = MusicDataset(f\"./data/atrain6_cqt_33_128_feature.pkl\",\n f\"./data/atrain6_cqt_33_128_target.pkl\")\n test_dataset = MusicDataset(f\"./data/atest6_cqt_33_128_feature.pkl\",\n f\"./data/atest6_cqt_33_128_target.pkl\")\n\n dev_generator = DataLoader(dataset=dev_dataset, batch_size=batch_size, shuffle=True)\n train_generator = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n test_generator = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\n\n print(\"build model\")\n # use GPU or CPU\n if USE_CUDA:\n device = torch.device(\"cuda\")\n print(\"GPU is available\")\n else:\n device = torch.device(\"cpu\")\n print(\"GPU not available, CPU used\")\n\n print(EXP)\n\n\n #model = models.LSTMNet(input_dim=feature_size, hidden_dim=hidden_dim,\n # batch_size=batch_size, output_dim=out_dim, num_layers=n_layers)\n\n model = models.ExperimentalRNN(input_dim=feature_size, hidden_dim=hidden_dim,\n output_dim=out_dim, num_layers=n_layers)\n\n # learning rate\n lr = 0.001\n # loss function\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n if os.path.exists(f'./models/{EXP}_model.pth'):\n trained_model = torch.load(f\"./models/{EXP}_model.pth\")\n\n else:\n model.to(device)\n trained_model = train_model(model, loss_fn, optimizer, train_generator, dev_generator, EXP)\n torch.save(trained_model, f\"./models/{EXP}_model.pth\")\n\n test_model(trained_model, loss_fn, test_generator)", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def runGRUEncoder(self, encoder, num_layers):\n\n inputs_ph = tf.placeholder(\n dtype=tf.float32,\n shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))\n inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))\n\n outputs, states = encoder.encode(\n mode=tf.estimator.ModeKeys.TRAIN,\n sequence_inputs=inputs_ph,\n sequence_length=inputs_length_ph)\n\n num_bi_layers = 1\n num_uni_layers = num_layers - num_bi_layers\n\n if num_uni_layers == 1:\n states_bi_bw, states_uni = states\n # states_bi_bw = (states_bi_bw,)\n self.assertEqual(1, len(states_bi_bw))\n self.assertEqual(num_uni_layers, len(states_uni))\n\n # unlike lstm, whose states is a tuple of (c,h),\n # gru states has only one element\n # states_bi_bw[0] is a states tensor\n states_list = [states_bi_bw[0]]\n for i in range(num_uni_layers):\n states_list.append(states_uni[i])\n states = tf.convert_to_tensor(states_list)\n else:\n states_uni = states\n self.assertEqual(num_uni_layers, len(states_uni))\n states_list = []\n for i in range(num_uni_layers):\n states_list.append(states_uni[i])\n states = tf.convert_to_tensor(states_list)\n\n inputs, inputs_length = common_utils.get_encoder_test_inputs()\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n outputs, states = sess.run(\n [outputs, states],\n feed_dict={\n inputs_ph: inputs,\n inputs_length_ph: inputs_length\n })\n\n self.assertAllEqual(\n [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],\n outputs.shape)\n\n if num_uni_layers == 1:\n self.assertEqual(num_layers, len(states))\n self.assertAllEqual(\n [num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],\n states.shape)\n else:\n self.assertEqual(num_uni_layers, len(states))\n self.assertAllEqual(\n [num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],\n states.shape)", "def __init__(self, incoming, n_units, W_ci, W_ig, W_og, W_fg,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True, comb='add',\n store_states=False, return_states=False, output_dropout=False, precomp_fwds=True, W_red_rec=None,\n a_reduce_recurrents=None, tickerstep_biases=None, learn_tickerstep_biases=True,\n dilation_rate=(1, 1), name='ConvLSTMLayer'):\n super(ConvLSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Check if feature dimensions to produce agree for all weight windows with n_units\n W_dims = [[w[0].get_shape().as_list()[3], w[1].get_shape().as_list()[3]] for w in [W_ci, W_ig, W_og, W_fg]]\n W_dims = np.array(W_dims).flatten()\n if np.any(W_dims != n_units):\n raise ValueError(\"Feature dimensions to produce must agree with n_units!\")\n \n # TODO: make all gates optional (list with keys and function for splitting)\n if not forgetgate:\n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [[tofov(w, shape=None, var_params=dict(name=n + suffix)) for w, suffix in\n zip(v, ['_fwd', '_bwd'])] for v, n in\n zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(b, shape=[n_units], var_params=dict(name=n)) for b, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections by concatenating them at sliding mask feature dimension\n # TODO: enable parallel calculation on multi-gpu\n W_fwd_conc = tf.concat(axis=3, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]], name='W_fwd_conc')\n W_bwd_conc = tf.concat(axis=3, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]], name='W_bwd_conc')\n \n # Initialize kernel for reducing recurrent features\n self.reduce_recurrents = None\n self.W_red_rec = W_red_rec\n if a_reduce_recurrents is not None:\n self.W_red_rec = tofov(W_red_rec, var_params=dict(name='W_red_rec'))\n \n def reduce_recurrents(h_prev):\n \"\"\"Reduces features of internal recurrent connections h_prev\"\"\"\n return a_reduce_recurrents(conv2d(h_prev, self.W_red_rec))\n \n self.reduce_recurrents = reduce_recurrents\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[2], output_shape[3], output_shape[4]],\n dtype=tf.float32, name='out_do_mask')\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, 1, 1, output_shape[4]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (outputs) and c (cell states) as Variable if overwriteable or tensor if not\n # shape=(samples, x, y, n_units)\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[2], output_shape[3], output_shape[4]],\n var_params=dict(trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[2], output_shape[3], output_shape[4]],\n var_params=dict(trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states\n h = [h_init] # [-1, x, y, n_units]\n c = [c_init] # [-1, x, y, n_units]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(ci=a_ci, ig=a_ig, og=a_og, fg=a_fg, out=a_out)\n self.b = OrderedDict(ci=b_ci, ig=b_ig, og=b_og, fg=b_fg)\n self.h = h\n self.c = c\n self.comb = comb\n self.max_seq_len = None\n self.external_rec = None\n \n self.dilation_rate = dilation_rate\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def TrainNetwork(self):\n\n self.logger.info('Train Network')\n self.netWork.TrainGenerator()\n\n # # train NetworkLSTM\n self.logger.info('Train NetworkLSTM')\n self.netWork.TrainLSTM()", "def __build_mol_to_latent_model(self):\n\n # Input tensor (MANDATORY)\n encoder_inputs = Input(shape=self.input_shape, name=\"Encoder_Inputs\")\n\n x = encoder_inputs\n\n # The two encoder layers, number of cells are halved as Bidirectional\n encoder = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_sequences=True,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_1\",\n )\n )\n\n x, state_h, state_c, state_h_reverse, state_c_reverse = encoder(x)\n\n if self.bn:\n x = BatchNormalization(momentum=self.bn_momentum, name=\"BN_1\")(x)\n\n encoder2 = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_2\",\n )\n )\n\n _, state_h2, state_c2, state_h2_reverse, state_c2_reverse = encoder2(x)\n\n # Concatenate all states of the forward and the backward LSTM layers\n states = Concatenate(axis=-1, name=\"Concatenate_1\")(\n [\n state_h,\n state_c,\n state_h2,\n state_c2,\n state_h_reverse,\n state_c_reverse,\n state_h2_reverse,\n state_c2_reverse,\n ]\n )\n\n if self.bn:\n states = BatchNormalization(momentum=self.bn_momentum, name=\"BN_2\")(states)\n\n # A non-linear recombination\n neck_relu = Dense(\n self.codelayer_dim, activation=self.h_activation, name=\"Codelayer_Relu\"\n )\n neck_outputs = neck_relu(states)\n\n if self.bn:\n neck_outputs = BatchNormalization(\n momentum=self.bn_momentum, name=\"BN_Codelayer\"\n )(neck_outputs)\n\n # Add Gaussian noise to \"spread\" the distribution of the latent variables during training\n neck_outputs = GaussianNoise(self.noise_std, name=\"Gaussian_Noise\")(\n neck_outputs\n )\n\n # Define the model\n self.__mol_to_latent_model = Model(encoder_inputs, neck_outputs)\n\n # Name it!\n self.mol_to_latent_model.name = \"mol_to_latent_model\"", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def test_deepconvlstm_enough_batchnorm(self):\n model = modelgen.generate_DeepConvLSTM_model(\n (None, 20, 3), 2, [32, 32, 32], [32, 32, 32])\n batch_norm_layers = len(\n [l for l in model.layers if 'BatchNormalization' in str(l)])\n activation_layers = len(\n [l for l in model.layers if 'Activation' in str(l)])\n assert batch_norm_layers == activation_layers", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def test_model_with_simple_rnn_multiple_layers(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.SimpleRNN(12, name='rnn0', activation='tanh', return_sequences=True)(inputs)\n x = tf.keras.layers.SimpleRNN(12, name='rnn1', activation='relu', return_sequences=True)(x)\n x = tf.keras.layers.SimpleRNN(12, name='rnn2', activation='tanh')(x)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax, name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./simple_rnn', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(6, len(conn_graph.get_all_ops()))\n num_detected_rnns = 0\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n num_detected_rnns += 1\n inner_list = op.internal_ops\n self.assertEqual(49, len(inner_list))\n self.assertEqual(3, num_detected_rnns)", "def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn", "def testKerasLSTM(self):\n input_data = {\n \"x\":\n constant_op.constant(\n np.array(\n np.random.random_sample((10, 10, 10)), dtype=np.float32))\n }\n\n model = keras.models.Sequential(\n [keras.layers.LSTM(units=10, input_shape=(10, 10))])\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[10, 10, 10], dtype=dtypes.float32)\n ])\n def to_save(x):\n return model(x)\n\n root, output_func = self._freezeModel(to_save)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def test_RLNAS1(self):\n port = 8773\n # config = [('MobileNetV2BlockSpace', {'block_mask': [0]})]\n config = [(\"ResNetBlockSpace2\", {\"block_mask\": [0]})]\n rlnas = RLNAS(\n key=\"lstm\",\n configs=config,\n server_addr=(\"\", port),\n is_sync=False,\n controller_batch_size=1,\n lstm_num_layers=1,\n hidden_size=10,\n temperature=1.0,\n save_controller=False,\n )\n input = paddle.static.data(name=\"input\", shape=[None, 3, 32, 32], dtype=\"float32\")\n archs = rlnas.next_archs(1)[0]\n for arch in archs:\n output = arch(input)\n input = output\n print(output)", "def model_DNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n ######## RELU??? DropOut\n # create and fit the LSTM network\n # input_shape = Lookback x Features\n\n #simpler loss recorder? keras.callbacks.BaseLogger(stateful_metrics=None)\n\n from keras.optimizers import Adam, Nadam\n\n if kwargs.get('nodes', None) is None or kwargs.get('nodes', 0)==0 or kwargs.get('nodes', [0])==[0]:\n kwargs['nodes'] = [np.shape(x_train)[1]]\n elif isinstance(kwargs['nodes'] , (int, np.integer)): # turn int to list\n kwargs['nodes'] = [kwargs['nodes'] ]\n elif len(kwargs.get('nodes',[0])) < kwargs.get('layers', 1 ):\n kwargs['nodes'] = kwargs.get('nodes',[0])\n\n if kwargs.get('layers', 1 ) > 1 and len(kwargs.get('nodes')) < kwargs.get('layers',1):\n kwargs['nodes'] = list(np.pad(kwargs['nodes'] ,[0,kwargs.get('layers')-len(kwargs.get('nodes'))], mode='constant',constant_values=kwargs.get('nodes')[-1]))\n\n nodes = kwargs.get('nodes',[1])\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n else:\n input_shape=(x_train.shape[1],x_train.shape[2])\n if np.ndim(y_train)==1:\n n_out = 1\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded.\n\n actvn = kwargs.get('actvn','tanh')\n if kwargs.get('onehot',False):\n actvl = kwargs.get('actvl','softmax')\n else:\n actvl = kwargs.get('actvl','tanh')\n\n if kwargs.get('bnorm', False):\n use_bias=False\n else:\n use_bias=True\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n dropout = kwargs.get('dropout',False)\n\n\n model=[]\n model = Sequential() # https://keras.io/models/sequential/ sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n model.reset_states()\n if kwargs.get('layers',1)>1:\n for n in range(1,kwargs.get('layers')):\n if kwargs.get('verbose'): print('+adding extra layer')\n if kwargs.get('Model')=='AEP' and nodes[n-1]==np.min(nodes):\n # bottleneck!\n model.add(Dense(nodes[n-1], input_shape=input_shape, use_bias=use_bias,activation='linear', name=\"bottleneck\")) #https://www.cs.toronto.edu/~hinton/science.pdf\n else:\n model.add(Dense(nodes[n-1], input_shape=input_shape, use_bias=use_bias)) #kernel_initializer= http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf\n if kwargs.get('bnorm', False) : model.add(keras.layers.normalization.BatchNormalization())\n model.add(keras.layers.Activation(actvn))\n if dropout:\n model.add(Dropout(dropout)) #(c.f. Regularisation of Betas)\n\n\n # Add last output layer with 1 node for y\n if ndim>2:\n model.add(Flatten())\n model.add(Dense(nodes[n]*nodes[n], actvn))\n\n model.add(Dense(n_out, input_shape=input_shape, activation=actvl))\n\n #Fun Hack >> Require re/compile to become effective!\n if kwargs.get('freeze', False) and 'start_weights' in kwargs.keys():\n for layer in model.layers:\n layer.trainable = False\n\n\n #defaults = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n #optimizer = Adam(lr=lr, beta_1=0.875, beta_2=0.95, epsilon=1e-8, decay=0.01)# laterversion>>, amsgrad=False)\n optimizer = Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004)\n\n else:\n #fails optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0)# laterversion>>, amsgrad=False)\n optimizer = 'adam'\n optimizer = Nadam(lr=0.005, beta_1=0.9, beta_2=0.990, epsilon=1e-8, schedule_decay=0.004)\n\n\n # lr_metric = get_lr_metric(optimizer)\n # from sklearn.metrics import r2_score glorot_normal\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # , lr_metric\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n # Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n else:\n model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[r2_keras])\n\n if kwargs.get('verbose',False) > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model", "def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def TestLSTM(test_x, test_y): \r\n loss = 0.0\r\n seq_length = test_y.shape[1]\r\n for t in range(seq_length):\r\n lstm_in = StepProcess(test_x, batch_size, source_length, lstm_step)\r\n logit = lstm_restored(lstm_in)\r\n # loss function : RSME TODO\r\n loss_0 = tf.keras.losses.MSE(test_y[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)# TODO\r\n pred_point = np.reshape(logit.numpy(), [batch_size, 1, 5])\r\n test_x = np.concatenate((test_x[:, 1:source_length, :], pred_point), axis=1) \r\n \r\n loss = tf.reduce_mean(loss)\r\n loss = loss / seq_length\r\n return loss", "def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def test_influencers_lstm():\n mock_input = Variable(torch.randn(9, 1, Artist.component_size))\n # 9 mixture components, e.g. 3 influencers, each with 3 components\n # 1 batch size (num artists), batching will occur at image level (GAN model)\n # Artist.comp_emb_size: emb_size of one component of Artist\n\n emb_size = 16\n hidden_size = 32\n n_layers = 1\n influencers_lstm = InfluencersLSTM(emb_size, hidden_size, n_layers)\n influencers_emb = influencers_lstm(mock_input)\n\n print influencers_emb", "def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n # Encoder GRU\n encoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy'])\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model", "def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):\n weights = RandomVar(\n LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')\n m = array_ops.zeros([batch_size, num_nodes], name='init_m')\n c = array_ops.zeros([batch_size, num_nodes], name='init_c')\n x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)\n\n out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)\n return out_seq, [weights]", "def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n logger.debug(\"Defining Inputs\")\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = tf.keras.layers.Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = tf.keras.layers.Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = tf.keras.layers.Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = tf.keras.layers.Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n logger.debug(\"Defining the sequential models\")\n\n # Encoder GRU\n encoder_gru = tf.keras.layers.GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = tf.keras.layers.GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n logger.debug(\"Defining the attention layer\")\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = tf.keras.layers.Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n logger.debug(\"Defining the dense layers\")\n # Dense layer\n dense = tf.keras.layers.Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = tf.keras.layers.TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n logger.debug(\"Defining the full model\")\n # Full model\n full_model = tf.keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n logger.debug(\"Defining the inference model\")\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = tf.keras.models.Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = tf.keras.layers.Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = tf.keras.layers.Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = tf.keras.layers.Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = tf.keras.layers.TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tf.keras.models.Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model", "def train_nn_sequence(sess, epochs, nn_last_layer, hidden_state, carry_state, batch_size, data_loader, accuracy_op, train_op, loss_function, input_tensor,\n truth_tensor, initial_hidden_state, initial_carry_state, learning_rate, base_learning_rate,\n learning_decay_rate, learning_decay_factor):\n #initialize variables\n sess.run(tf.global_variables_initializer())\n \n print(\"Training...\")\n print()\n scaling_rate = 1\n \n loss_output = 0\n for i in range(epochs):\n loss_output = 0\n print(\"EPOCH {} ...\".format(i+1))\n if i%learning_decay_rate == 0 and i != 0:\n scaling_rate = learning_decay_factor * scaling_rate\n j = 0\n sum_accuracy = 0\n sum_loss = 0\n for image, output, batch_i_size in data_loader.get_train_batches_fn_timeseries_sequence(batch_size):\n initial_state_value = np.zeros(shape=(batch_i_size, 29, 39, 20), dtype=float)\n \n nn_output, lstm_hidden_state, lstm_carry_state, optimizer, loss = sess.run([nn_last_layer, hidden_state, carry_state, train_op, loss_function], \n feed_dict={input_tensor: image, truth_tensor: output, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value, learning_rate: scaling_rate*base_learning_rate})\n \n \n #print(np.shape(lstm_hidden_state))\n #print(np.shape(lstm_carry_state))\n \n accuracy, loss_output = sess.run([accuracy_op, loss_function], feed_dict={input_tensor: image, truth_tensor: output, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value})\n \n #print(np.shape(loss_output))\n sum_accuracy = sum_accuracy + accuracy\n sum_loss = sum_loss + loss_output\n j = j+1\n\n valid_x, valid_y, valid_size = data_loader.get_validation_data_sequence()\n initial_state_value = np.zeros(shape=(valid_size, 29, 39, 20), dtype=float)\n valid_accuracy = sess.run([accuracy_op],\n feed_dict={input_tensor: valid_x, truth_tensor: valid_y, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value})\n print(\"Loss {} ...\".format(sum_loss/j))\n print(\"Train Accuracy {} ...\".format(sum_accuracy/j))\n print(\"Validation Accuracy {} ...\".format(valid_accuracy))", "def test_word_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": {\"emb_dim\": 30},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": \"True\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def run_LSTM(data):\n # Initialising the RNN\n model = Sequential()\n \"\"\"\n # Adding the first LSTM layer and some Dropout regularisation\n model.add(LSTM(units=256,return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n model.add(Dropout(0.3))\n model.add(Dense(units=1))\n \"\"\"\n\n model = Sequential()\n model.add(LSTM(units=180, return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n #model.add(Dropout(params['dropout']))\n #model.add(LSTM(units=128))\n #model.add(Dropout(params['dropout']))\n #model.add(Dense(units=64))\n model.add(Dense(units=1,activation='relu',kernel_initializer=tf.keras.initializers.Orthogonal()))\n # Compiling the RNN\n opt = Adam(lr=0.0052)\n model.compile(optimizer=opt, loss='mean_squared_error',metrics=['mean_absolute_percentage_error'])\n\n # Fitting the RNN to the Training set\n regressor = model.fit(data.x_train, data.y_train.ravel(), epochs=180,batch_size=410,shuffle=True,validation_data=(data.x_valid,data.y_valid.ravel()))\n\n #Create plots\n plt.plot(regressor.history['loss'], label='loss')\n plt.plot(regressor.history['val_loss'], label='val_loss')\n plt.legend()\n plt.show()\n\n\n plt.figure()\n outputs = model.predict(data.x_test)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n print(predictions.shape)\n\n pred_prices = predictions.reshape(-1,1)\n real_prices = data.y_test.reshape(-1,1)\n mape = 0\n\n pred_prices = data.inv.inverse_transform(pred_prices)\n real_prices = data.inv.inverse_transform(real_prices)\n\n #if Returns == False:\n \n #mape = mean_absolute_percentage_error(real_prices, pred_prices)\n\n #pred_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in predictions.reshape(-1)]\n #real_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in data.y_test.reshape(-1)]\n\n #pred_prices = data.train_sc.inverse_transform(predictions.reshape(-1))\n #real_prices = data.test_sc.inverse_transform(data.y_test.reshape(-1))\n\n #mape = mean_absolute_percentage_error(data.y_test.ravel(), pred_prices.)\n y_true, y_pred = np.array(real_prices).reshape(-1,1), np.array(pred_prices).reshape(-1,1)\n #y_true, y_pred = y_true[:50], y_pred[:50]\n\n mape = mean_absolute_percentage_error(y_true, y_pred)\n pct = PCT(y_true,y_pred)\n mse = mean_squared_error(y_true,y_pred)\n rmse = sqrt(mse)\n amape = AMAPE(y_true,y_pred)\n mae = MAE(y_true,y_pred)\n\n plt.plot(real_prices, label='targets')\n plt.plot(pred_prices, label='predictions')\n plt.legend()\n plt.title('LSTM test data')\n plt.show()\n\n plt.figure()\n outputs = model.predict(data.x_train)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n plt.plot(data.y_train.ravel(), label='targets')\n plt.plot(predictions, label='predictions')\n plt.legend()\n plt.title('LSTM train data')\n plt.show()\n print(y_pred)\n\n print('RMSE= {:.6f}, MAPE = {:.6f}, PCT = {:.6f}, MSE = {:.6f}, MAE = {:.6f}, AMAPE = {:.6f}'.format(rmse, mape, pct, mse, mae, amape))", "def test_deepconvlstm_starts_with_batchnorm(self):\n model = modelgen.generate_DeepConvLSTM_model(\n (None, 20, 3), 2, [32, 32], [32, 32])\n assert str(type(model.layers[0])) \\\n == \"<class 'keras.layers.normalization.BatchNormalization'>\", \\\n 'Wrong layer type.'", "def test_char_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"embedder_type\": \"glove\", \"emb_dim\": 30, \"tokenizer_type\": \"char-tokenizer\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n config = {**config, \"params\": {\n \"embedder_type\": None, \"emb_dim\": 30, \"tokenizer_type\": \"char-tokenizer\"}\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": \"True\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def model_RNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n \"\"\"\n Notes on Input shape\n 3D tensor with shape (batch_size, timesteps, input_dim).\n https://keras.io/layers/recurrent/\n LSTMs in Keras are typically used on 3d data (batch dimension, timesteps, features).\n LSTM without return_sequences will output (batch dimension, output features)\n LSTM with return_sequences will output (batch dimension, timesteps, output features)\n Basic timeseries data has an input shape (number of sequences, steps, features). Target is (number of sequences, steps, targets). Use an LSTM with return_sequences.\n \"\"\"\n ######## RELU??? DropOut\n # create and fit the LSTM network\n # input_shape = Lookback x Features\n verbose = kwargs.get('verbose',False)\n layers = kwargs.get('layers', 1 )\n nodes = kwargs.get('nodes', None)\n\n if nodes is None or nodes==0 or nodes==[0]:\n nodes = [np.shape(x_train)[1]]\n elif isinstance(nodes, (int, np.integer)): # turn int to list\n nodes = [nodes]\n\n if layers > 1 and len(nodes) < layers:\n nodes = list(np.pad(nodes,[0,layers-len(nodes)], mode='constant',constant_values=nodes[-1]))\n\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n else:\n input_shape=(x_train.shape[1],x_train.shape[2])\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n if np.ndim(y_train)==1:\n n_out = 1 #e.g. forecast y as float, just 1 step ahead.\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded, or n-steps ahead.\n\n dropout = kwargs.get('dropout',0) # dropout rate between 0 and 1.\n stateful = kwargs.get('stateful',True)\n if stateful: #RNN needs fixed batch - consider using static_index\n batch_shape = (kwargs.get('batch_size',1234),) + input_shape\n actvn = kwargs.get('actvn','tanh')\n actvl = kwargs.get('actvl','sigmoid')\n if verbose and not actvn == 'tanh': print('tanh activation recommended for LSTM but you are using',actvn)\n\n model=[]\n model = Sequential() # https://keras.io/models/sequential/\n model.reset_states() # ?useful for batch training RNN... perhaps inside batched loop\n #TODO? model.add(Embedding(max_features, output_dim=n_out))\n\n if layers>1:\n for n in range(1,layers):\n if kwargs.get('verbose'): print('+adding extra layer')\n if stateful: #switch between batch_ and input_shape\n model.add(LSTM(nodes[layers-1], batch_input_shape=batch_shape, return_sequences=True, activation=actvn, stateful=stateful))\n else:\n model.add(LSTM(nodes[layers-1], input_shape=input_shape, return_sequences=True, activation=actvn, stateful=stateful))\n if kwargs.get('bnorm', False):\n model.add(keras.layers.normalization.BatchNormalization())\n # TODO find out about time lock dropout\n if dropout:\n model.add(Dropout(dropout)) #(c.f. Regularisation of Betas)\n\n # Single layer or last layer of RNN\n if stateful:\n model.add(LSTM(nodes[layers-1], batch_input_shape=batch_shape, return_sequences=False, activation=actvn, stateful=stateful))\n else:\n model.add(LSTM(nodes[layers-1], input_shape=input_shape, return_sequences=False, activation=actvn, stateful=stateful))\n\n #model.add(Flatten()) # Req'd if last layer return_sequences=True\n #model.add(Dense(nodes[layers-1]**2, activation=actvl))\n model.add(Dense(n_out, activation=actvl))\n\n #defaults = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n optimizer = keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004)\n else:\n optimizer = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8,schedule_decay=0.004)\n optimizer = 'adam'\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer)\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n #TODO Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n else:\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n\n if verbose > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model", "def lstm_classifier(**kwargs):\n input_vector_size = kwargs.get('input_vector_size', 128)\n dense_size = kwargs.get('dense_size', 20)\n output = kwargs.get('label_size', 2)\n timesteps = 1\n xav_init = tf.contrib.layers.xavier_initializer()\n adam = optimizers.Adam(lr=0.01)\n sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n ##########\n\n model = Sequential()\n model.add(CuDNNLSTM(64))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return model", "def lrcn(self):\n model = Sequential()\n\n model.add(TimeDistributed(Conv2D(32, (7, 7), strides=(2, 2),\n activation='relu', padding='same'), input_shape=self.input_shape))\n model.add(TimeDistributed(Conv2D(32, (3,3),\n kernel_initializer=\"he_normal\", activation='relu')))\n model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))\n\n model.add(TimeDistributed(Conv2D(64, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(Conv2D(64, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))\n\n model.add(TimeDistributed(Conv2D(128, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(Conv2D(128, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))\n\n model.add(TimeDistributed(Conv2D(256, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(Conv2D(256, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))\n\n model.add(TimeDistributed(Conv2D(512, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(Conv2D(512, (3,3),\n padding='same', activation='relu')))\n model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))\n\n model.add(TimeDistributed(Flatten()))\n\n model.add(Dropout(0.5))\n #model.add(LSTM(256, return_sequences=False, dropout=0.5))\n model.add(LSTM(4096, return_sequences=False, dropout=0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def makemod(LSTM_layers, LSTM_sizes, Dense_layers, text_designation, vocab_size, x_train, y_train, val_size=0.1,\n num_epochs=25, batch_size=False, loss_type=\"categorical_crossentropy\", opt=\"adam\"):\n if not batch_size:\n batch_size = \"No\"\n for lstmlayer in LSTM_layers:\n for lstmsize in LSTM_sizes:\n for denselayer in Dense_layers:\n NAME = f\"{text_designation}-model, {lstmlayer} layer(s) of {lstmsize} LSTM Nodes, \" \\\n f\"{denselayer} Dense, {num_epochs} Ep, {batch_size} Bat, \" \\\n f\"{val_size*100}% Val\"\n model = Sequential()\n for l in range(lstmlayer - 1):\n model.add(LSTM(lstmsize, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2])))\n model.add(LSTM(lstmsize, input_shape=(x_train.shape[1], x_train.shape[2])))\n for l in range(denselayer):\n model.add(Dense(vocab_size, activation='relu'))\n model.add(Dense(vocab_size, activation='softmax'))\n print(model.summary())\n # Log the model\n tb = TensorBoard(log_dir=f\"logs\\logs\\{NAME}\")\n # Compile model\n model.compile(loss=loss_type, optimizer=opt, metrics=[\"accuracy\"])\n es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, restore_best_weights=True)\n model.fit(x_train, y_train, epochs=num_epochs, batch_size=100, validation_split=val_size, shuffle=True,\n verbose=2, callbacks=[tb, es])\n print(\"Model {} created\".format(NAME))\n # Save Model\n model.save(f\"models\\models\\{NAME}\")\n print(\"Model {} saved\".format(NAME))", "def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def test_rnnslu(**kwargs):\n # process input arguments\n param = {\n 'fold': 3,\n 'lr': 0.1,\n 'verbose': True,\n 'decay': False,\n 'win': 3,\n 'nhidden': 300,\n 'seed': 345,\n 'emb_dimension': 50,\n 'nepochs': 60,\n 'normal': False,\n 'folder':'../result',\n 'longdependence':None,\n 'optimization':'Adagrad'\n }\n param_diff = set(kwargs.keys()) - set(param.keys())\n if param_diff:\n raise KeyError(\"invalid arguments:\" + str(tuple(param_diff)))\n param.update(kwargs)\n\n if param['verbose']:\n for k,v in param.items():\n print(\"%s: %s\" % (k,v))\n\n # create result folder if not exists\n check_dir(param['folder'])\n\n # load the dataset\n print('... loading the dataset')\n train_set, valid_set, test_set, dic = load_data(param['fold'])\n\n # create mapping from index to label, and index to word\n idx2label = dict((k, v) for v, k in dic['labels2idx'].items()) # change label2index - index2label\n idx2word = dict((k, v) for v, k in dic['words2idx'].items()) # change words2index - index2words\n\n # unpack dataset\n train_lex, train_ne, train_y = train_set\n valid_lex, valid_ne, valid_y = valid_set\n test_lex, test_ne, test_y = test_set \n\n train_lex = train_lex + test_lex\n train_y = train_y + test_y\n train_ne = train_ne + test_ne\n\n vocsize = len(dic['words2idx']) # # of words\n nclasses = len(dic['labels2idx']) # # of classes \n nsentences = len(train_lex) # # training sample [a batch is all the words in a sentence]\n\n ## get the label for (input,output) for test and valid set \n groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]\n words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]\n\n # instanciate the model\n numpy.random.seed(param['seed'])\n random.seed(param['seed'])\n \n\n print('... building the model')\n lstm = LSTM(\n nh=param['nhidden'],\n nc=nclasses,\n ne=vocsize,\n de=param['emb_dimension'],\n cs=param['win'],\n normal=param['normal'],\n longdependence = param['longdependence'],\n optimization = param['optimization']\n )\n\n ## build the model for mini-batch\n # train with early stopping on validation set\n print('... training')\n best_f1 = -numpy.inf\n param['clr'] = param['lr']\n \n for epoch in range(param['nepochs']):\n\n param['ce'] = epoch\n tic = timeit.default_timer()\n print('epoch %i out of %i' %(epoch,param['nepochs']) )\n \n for i, (x, y) in enumerate(zip(train_lex, train_y)):\n input_length = len(x)\n lstm.train(x, y, param['win'], param['clr'])\n print('[learning] epoch %i >> %2.2f%%' % (\n epoch, (i + 1) * 100. / nsentences), end=' ')\n print('completed in %.2f (sec) <<\\r' % (timeit.default_timer() - tic), end='')\n\n # evaluation // back into the real world : idx -> words\n predictions_valid = [map(lambda x: idx2label[x],\n lstm.classify(numpy.asarray(\n contextwin(x, param['win'])).astype('int32')))\n for x in valid_lex]\n\n # evaluation // compute the accuracy using conlleval.pl\n res_valid = conlleval(predictions_valid,\n groundtruth_valid,\n words_valid,\n param['folder'] + '/current.valid.txt',\n param['folder'])\n\n if res_valid['f1'] > best_f1:\n\n best_f1 = res_valid['f1']\n\n if param['verbose']:\n print('NEW BEST: epoch', epoch,\n 'best test F1', res_valid['f1'])\n\n param['tf1'] = res_valid['f1']\n param['tp'] = res_valid['p']\n param['tr'] = res_valid['r']\n param['be'] = epoch\n else:\n if param['verbose']:\n print('')\n\n # learning rate decay if no improvement in 10 epochs\n if param['decay'] and abs(param['be']-param['ce']) >= 10:\n param['clr'] *= 0.5\n\n if param['clr'] < 1e-5:\n break\n \n\n print('BEST RESULT: epoch', param['be'],\n 'best test F1', param['tf1'],\n 'with the model', param['folder'])\n \n return lstm", "def test_lstm_no_bias(self):\n\n class SimpleNoBiasLSTM(nn.Module):\n def __init__(self):\n super(SimpleNoBiasLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(5, 10, 1, bias=False)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 5)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(10, 3, 5)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleNoBiasLSTM()\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.5, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.tensor4()\n y_batch = T.tensor4()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n print(num_layers)\n\n code_layer = layers[num_layers/2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n # validation cost\n valid_output = lasagne.layers.get_output(network, X_batch)\n valid_cost = lasagne.objectives.mse(valid_output, y_batch) \n valid_cost = valid_cost.mean() \n\n # test the performance of the netowork without noise\n pred = lasagne.layers.get_output(network, X_batch, deterministic=True)\n # pred = T.argmax(test, axis=1)\n accuracy = 1 - T.mean(lasagne.objectives.mse(pred, y_batch), dtype=theano.config.floatX)\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n valid = theano.function(inputs=[X_batch, y_batch], outputs=valid_cost, allow_input_downcast=True)\n predict = theano.function(inputs=[X_batch], outputs=pred, allow_input_downcast=True)\n accuracy = theano.function(inputs=[X_batch,y_batch], outputs=accuracy, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n valid=valid,\n predict=predict,\n accuracy=accuracy,\n code=code\n )", "def test_bpe_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"emb_dim\": 30, \"tokenizer_type\": \"bpe-tokenizer\", \"add_terminals\": True\n },\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"max\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean_sqrt\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"last\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {\n **config[\"params\"], \"use_crf_layer\": False, \"token_spans_pooling_type\": \"first\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def test_mstgcn():\n node_count = 307\n num_classes = 10\n edge_per_node = 15\n\n num_for_predict = 12\n len_input = 12\n nb_time_strides = 1\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n node_features = 2\n nb_block = 2\n K = 3\n nb_chev_filter = 64\n nb_time_filter = 64\n batch_size = 32\n\n model = MSTGCN(\n nb_block,\n node_features,\n K,\n nb_chev_filter,\n nb_time_filter,\n nb_time_strides,\n num_for_predict,\n len_input,\n ).to(device)\n T = len_input\n x_seq = torch.zeros([batch_size, node_count, node_features, T]).to(device)\n target_seq = torch.zeros([batch_size, node_count, T]).to(device)\n edge_index_seq = []\n\n for b in range(batch_size):\n for t in range(T):\n x, edge_index = create_mock_data(node_count, edge_per_node, node_features)\n x_seq[b, :, :, t] = x.to(device)\n if b == 0:\n edge_index_seq.append(edge_index.to(device))\n target = create_mock_target(node_count, num_classes).to(device)\n target_seq[b, :, t] = target\n\n shuffle = True\n train_dataset = torch.utils.data.TensorDataset(x_seq, target_seq)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=shuffle\n )\n\n for batch_data in train_loader:\n encoder_inputs, _ = batch_data\n outputs1 = model(encoder_inputs, edge_index_seq)\n outputs2 = model(encoder_inputs, edge_index_seq[0])\n\n assert outputs1.shape == (batch_size, node_count, num_for_predict)\n assert outputs2.shape == (batch_size, node_count, num_for_predict)", "def time_cnn():\n\n data_dir = \"/home/liyanzeng/git/Var-CNN--DynaFlow/preprocess\"\n\n # read in data from numpy files\n train_metadata = np.load(r\"%s/train_metadata.npy\" % data_dir)\n test_metadata = np.load(r\"%s/test_metadata.npy\" % data_dir)\n train_seq = np.load(r\"%s/train_seq.npy\" % data_dir)\n train_labels = np.load(r\"%s/train_labels.npy\" % data_dir)\n test_seq = np.load(r\"%s/test_seq.npy\" % data_dir)\n test_labels = np.load(r\"%s/test_labels.npy\" % data_dir)\n\n # apply normalization to metadata\n metadata_scaler = StandardScaler()\n train_metadata = metadata_scaler.fit_transform(train_metadata)\n test_metadata = metadata_scaler.transform(test_metadata)\n\n # extract sequences\n train_time, train_time_dleft, train_time_dright, train_dir = np.split(train_seq, 4, axis=2)\n test_time, test_time_dleft, test_time_dright, test_dir = np.split(test_seq, 4, axis=2)\n\n # reshape to be able to normalize\n train_time = np.reshape(train_time, (train_time.shape[0], train_time.shape[1]))\n test_time = np.reshape(test_time, (test_time.shape[0], test_time.shape[1]))\n train_time_dleft = np.reshape(train_time_dleft, (train_time_dleft.shape[0], train_time_dleft.shape[1]))\n test_time_dleft = np.reshape(test_time_dleft, (test_time_dleft.shape[0], test_time_dleft.shape[1]))\n train_time_dright = np.reshape(train_time_dright, (train_time_dright.shape[0], train_time_dright.shape[1]))\n test_time_dright = np.reshape(test_time_dright, (test_time_dright.shape[0], test_time_dright.shape[1]))\n\n # apply normalization to packet time data according to scaling computed on train timestamp data\n time_scaler = StandardScaler()\n train_time = time_scaler.fit_transform(train_time)\n test_time = time_scaler.transform(test_time)\n train_time_dleft = time_scaler.transform(train_time_dleft)\n test_time_dleft = time_scaler.transform(test_time_dleft)\n train_time_dright = time_scaler.transform(train_time_dright)\n test_time_dright = time_scaler.transform(test_time_dright)\n\n train_seq = np.stack((train_time, train_time_dleft, train_time_dright), axis=-1)\n test_seq = np.stack((test_time, test_time_dleft, test_time_dright), axis=-1)\n\n # construct CNN\n cnn_input = Input(shape=(seq_length, 3,), name='cnn_input')\n cnn_model = time_conv_block(cnn_input, 2, 4)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_output = Flatten()(cnn_model)\n cnn_output = dense_layer(cnn_output, 1024, 0.4)\n\n # construct MLP for metadata\n metadata_input = Input(shape=(7,), name='metadata_input')\n metadata_output = dense_layer(metadata_input, 32, 0.) # consider this the embedding of all the metadata\n\n # concatenate before second dense layer\n combined = Concatenate()([cnn_output, metadata_output])\n combined = dense_layer(combined, 1024, 0.5)\n\n # add final softmax layer\n if NUM_UNMON_SITES == 0: # closed-world\n combined_output = Dense(units=NUM_MON_SITES, activation='softmax', name='combined_output')(combined)\n else:\n # add extra class for unmonitored sites\n combined_output = Dense(units=NUM_MON_SITES + 1, activation='softmax', name='combined_output')(combined)\n\n model = Model(inputs=[cnn_input, metadata_input], outputs=[combined_output])\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy'])\n\n training_data = ({'cnn_input': train_seq,\n 'metadata_input': train_metadata},\n {'combined_output': train_labels})\n\n test_data = ({'cnn_input': test_seq,\n 'metadata_input': test_metadata},\n {'combined_output': test_labels})\n\n lr_modifier = LearningRateScheduler(schedule=lr_scheduler)\n\n # train model\n train_time_start = time.time()\n model.fit(x=training_data[0],\n y=training_data[1],\n batch_size=50,\n epochs=200,\n verbose=0,\n callbacks=[lr_modifier])\n train_time_end = time.time()\n\n # compute final softmax predictions on test set and save predictions\n test_time_start = time.time()\n predictions = model.predict(test_data[0], batch_size=50, verbose=0)\n test_time_end = time.time()\n \n save_dir = \"predictions\"\n np.save(file=r\"%s/time_model\" % save_dir, arr=predictions)\n \n return (train_time_end - train_time_start), (test_time_end - test_time_start)", "def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)", "def _TestEnsemble(self, config):\n # Note that the initialization of the lattice must be the same across the\n # units dimension (otherwise the loss will be different).\n # We fix the random seed to make sure we get similar initialization.\n if self.disable_ensembles:\n return\n config = dict(config)\n config[\"num_training_epoch\"] = 3\n config[\"kernel_initializer\"] = \"constant\"\n losses = []\n for units, lattice_index in [(1, 0), (3, 0), (3, 2)]:\n config[\"units\"] = units\n config[\"lattice_index\"] = lattice_index\n tf.keras.utils.set_random_seed(42)\n losses.append(self._TrainModel(config))\n self.assertAlmostEqual(min(losses), max(losses), delta=self.loss_eps)", "def test_mlp(learning_rate=.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=150,\n dataset='mnist.pkl.gz', batch_size=20, n_hidden=100):\n #Note - transfer is used to check whether test_mlp is running for the first time with new weights or second time with transferred weights\n #Transfer is initialized to be false.\n #a transfer in the if statement will run the code for the Letters data set first and Numbers data set second.\n #(Not transfer) will run the code for the Numbers data set first and Letters data set second. \n\n #CHANGE FLAG - edit order datasets are run in and dataset name\n if(transfer):\n #datasets = load_data(dataset)\n f = open('HSFNums.p','rb')\n datasets = pickle.load(f)\n\n else:\n #datasets = getHSF()\n f = open('HSFLetters2.p','rb')\n datasets = pickle.load(f)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n f.close()\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n #total size of valid data is printed\n print 'This is the vector size of the inputs' #\n print train_set_x.get_value(borrow=True).shape #\n print n_train_batches #\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #Data reduction\n if(transfer):\n train_set_x = train_set_x[0:int(1.0*n_train_batches*batch_size),:]\n train_set_y = train_set_y[0:int(1.0*n_train_batches*batch_size)]\n\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n rng = numpy.random.RandomState(1234)\n\n # construct the MLP class\n #problem is you can't pass weights through here, b/c of gradient descent\n #algorithms use these parameters\n\n #Numbers have 10 classifications, Letters have 26 classifications.\n #transfer is initialized as false, so depending on which dataset should be run first, edit this\n #CHANGE FLAG - edit the order the network trains in and the number of outputs (n_out)\n if(transfer):\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=10\n )\n else:\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=26\n )\n\n # the cost we minimize during training is the negative log likelihood of\n # the model plus the regularization terms (L1 and L2); cost is expressed\n # here symbolically\n cost = (\n classifier.negative_log_likelihood(y)\n + L1_reg * classifier.L1\n + L2_reg * classifier.L2_sqr\n )\n\n # compiling a Theano function that computes the mistakes that are made\n # by the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta (stored in params)\n # the resulting gradients will be stored in a list gparams\n gparams = [T.grad(cost, param) for param in classifier.params]\n\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs\n\n # given two lists of the same length, A = [a1, a2, a3, a4] and\n # B = [b1, b2, b3, b4], zip generates a list C of same size, where each\n # element is a pair formed from the two lists :\n # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(classifier.params, gparams)\n ]\n\n # compiling a Theano function `train_model` that returns the cost, but\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #CHANGE FLAG - edit based on the order the network rusn in and the input file name\n inputSize=100 #number of input images sampled from next dataset for transfer calculations\n if(not transfer):\n #f2 = open('HSFLetters2.p','rb')\n #f2 can be changed based on whether letters should be transferred to numbers or v.c.\n f2 = open('HSFNums.p','rb')\n datasetsTransfer = pickle.load(f2)\n train_set_x2, train_set_y2 = datasetsTransfer[0]\n inputs=train_set_x2.get_value(borrow=True) #inputs\n f2.close()\n \n ###############\n # TRAIN MODEL #\n ###############\n print '... training'\n\n # early-stopping parameters\n patience = 10000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatches before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n best_iter = 0\n test_score = 0.\n start_time = timeit.default_timer()\n\n epoch = 0\n done_looping = False\n\n\n #opening files to print validation error to\n if(not transfer):\n outFile = open('out.txt','w')\n else:\n outFile = open('outTransfer.txt','w')\n\n\n #Inserted code for printing out validation after randomization\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n\n\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n minibatch_avg_cost = train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n #improve patience if loss improvement is good enough\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n # test it on the test set\n test_losses = [test_model(i) for i\n in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.))\n\n if patience <= iter:\n done_looping = True\n break\n #closing file\n outFile.close()\n end_time = timeit.default_timer()\n print(('Optimization complete. Best validation score of %f %% '\n 'obtained at iteration %i, with test performance %f %%') %\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\n print >> sys.stderr, ('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.))\n\n\n\n #Goal of block: Calculate hidden node activations and find which weights to transfer\n # Create global theano shared variable for the weights to transfer\n if(not transfer):\n \n #Set threshold to determine bounds for activated nodes - Weights leading to activated nodes with absolute values >= threshold\n #will be copied over. Other weights are re-initialized.\n threshold = 0.0\n n_in = 28*28\n #inputs are passed from the train_set_x above\n hidden1W = classifier.hiddenLayer.W.get_value()\n hidden1Wcopy = hidden1W\n #Making a copy of the first hidden layer of weights to be used in calculations for second hidden lyaer of weights\n aveList = []\n #aveList represents the average hidden node activations for layer 1\n print 'starting transfer calculations'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n #Design choice to use absolute value b/c a positive activation and a negative activation were both considered important\n x += abs(numpy.tanh(numpy.tensordot(inputs[j,:],hidden1W[:,i],axes=1)))\n aveList.append(x/inputSize)\n\n print 'ending calculation'\n\n count = 0\n for i in range(0,n_hidden):\n \n if(aveList[i] < threshold):\n #If the activation is below the threshold, then the weights corresponding leading to that hidden node will be reinitialized\n hidden1W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_hidden)),\n high=numpy.sqrt(6. / (n_in + n_hidden)),\n size=(n_in,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count+=1\n print 'A total number of ' + str(count) + ' H1 nodes passed the threshold'\n \n #saving count of hidden nodes\n outFile3 = open('transfer.txt','w')\n outFile3.write(str(count))\n outFile3.write('\\n')\n\n\n\n hidden1Act = numpy.zeros((1,n_hidden))\n #Making a dummy hidden layer variable to edit\n\n #now for the next hidden layer :)\n hidden2W = classifier.hiddenLayer2.W.get_value()\n aveList = []\n #aveList here represents the average hidden node activations for layer 2\n print 'starting next hidden layer calculation'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n for k in range(0,n_hidden):\n hidden1Act[0][k] = numpy.tanh(numpy.tensordot(inputs[j,:],hidden1Wcopy[:,k],axes=1))\n x += abs(numpy.tanh(numpy.tensordot(hidden1Act[0,:],hidden2W[:,i],axes=1)))\n aveList.append(x/inputSize)\n print 'ending hidden 2 calculation'\n count = 0\n for i in range(0,n_hidden):\n if(aveList[i] < threshold):\n hidden2W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_hidden + n_hidden)),\n high=numpy.sqrt(6. / (n_hidden + n_hidden)),\n size = (n_hidden,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count += 1\n print 'A total number of ' + str(count) + ' H2 nodes passed the threshold'\n\n outFile3.write(str(count))\n outFile3.close()\n\n\n #3 global variables exist. tensor and tensor2 variables are the global theano shared variables for the weights.\n #During the next run, the MLP will be initialized with these weights thereby transferring the weights from this run.\n global transfer\n transfer = True\n global tensor\n global tensor2\n tensor = theano.shared(value=hidden1W,name = 'W', borrow=True)\n tensor2 = theano.shared(value = hidden2W, name = 'tensor2', borrow=True)\n\n test_mlp() \n else:\n print 'Thank you for running this transfer program'\n print 'Below are descriptions of files that have been created'\n print 'out.txt - validation error while training'\n print 'outTransfer.txt - validation error while training after transfer learning'\n print 'transfer.txt - number of hidden nodes transferred in each layer'", "def build_train_network(self):\n # Inputs\n vid_input = tf.placeholder(tf.float32, [None, self.num_frame, self.feat_size])\n caption_input = tf.placeholder(tf.int32, [None, self.sent_len])\n caption_mask = tf.placeholder(tf.float32, [None, self.sent_len])\n\n batch_size = tf.shape(vid_input)[0]\n # State variables\n v_LSTM_states = (tf.zeros((batch_size, self.v_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.v_LSTM_cell.state_size[1])))\n t_LSTM_states = (tf.zeros((batch_size, self.t_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.t_LSTM_cell.state_size[1])))\n padding = tf.zeros([batch_size, self.state_size])\n\n loss = 0.0\n # Encoder network\n # To ensure reuse is False when calling Adam \n with tf.variable_scope(tf.get_variable_scope()):\n for idx in range(self.num_frame):\n if idx > 0:\n tf.get_variable_scope().reuse_variables()\n\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(vid_input[:,idx,:], v_LSTM_states)\n with tf.variable_scope('t_LSTM'):\n _, t_LSTM_states = self.t_LSTM_cell(tf.concat([padding, v_output], 1), t_LSTM_states)\n \n null_video = tf.zeros([batch_size, self.feat_size])\n for idx in range(self.sent_len):\n tf.get_variable_scope().reuse_variables()\n # pdb.set_trace() \n # Decoder network\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(null_video, v_LSTM_states) \n # Lookup word embedding for each word in current time frame\n caption_embed = tf.nn.embedding_lookup(self.word_embed, caption_input[:,idx])\n with tf.variable_scope('t_LSTM'):\n t_output, t_LSTM_states = self.t_LSTM_cell(tf.concat([v_output, caption_embed], 1), t_LSTM_states)\n logit_output = tf.nn.xw_plus_b(t_output, self.t_output_W, self.t_output_b)\n # Label processing\n caption_onehot = tf.one_hot(caption_input[:,idx], self.dict_size)\n # Calculate loss\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit_output, labels=caption_onehot)\n cross_entropy = cross_entropy * caption_mask[:,idx]\n\n loss += tf.reduce_mean(cross_entropy)\n # Average loss\n # loss = loss / tf.reduce_sum(caption_mask)\n # pdb.set_trace()\n train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)\n # train_op = None\n\n tf.add_to_collection('x', vid_input)\n tf.add_to_collection('y', caption_input)\n tf.add_to_collection('y_mask', caption_mask)\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('train_op', train_op)\n \n return dict(\n x = vid_input,\n y = caption_input,\n y_mask = caption_mask,\n loss = loss,\n train_op = train_op\n )", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def compute_nsp(self, prev_layers=None, max_seq_len=None, tickersteps=0, tickerstep_nodes=False, **kwargs):\n incoming = self.incoming(prev_layers=prev_layers, comp_next_seq_pos=True, max_seq_len=max_seq_len,\n tickersteps=tickersteps, tickerstep_nodes=tickerstep_nodes, **kwargs)\n \n external_rec = None\n if self.external_rec is not None:\n external_rec = self.external_rec(prev_layers=prev_layers, max_seq_len=max_seq_len, tickersteps=tickersteps,\n tickerstep_nodes=tickerstep_nodes, comp_next_seq_pos=True,\n **kwargs)[:, -1, :]\n \n act = OrderedDict(zip_longest(self.lstm_inlets, [None]))\n \n with tf.variable_scope(self.name) as scope:\n # Make sure tensorflow can reuse the variable names\n scope.reuse_variables()\n \n # Handle restriction on maximum sequence length\n if max_seq_len is not None:\n incoming = incoming[:, :max_seq_len, :]\n \n #\n # Compute LSTM cycle at each sequence position in 'incoming'\n #\n \n # Loop through sequence positions and get corresponding net_fwds\n for seq_pos, net_fwd in self.comp_net_fwd(incoming):\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Sum up net from forward and recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n # peepholes could be added here #\n \n # Calculate activations\n if tickerstep_nodes and (self.W_tickers is not None):\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n\n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n\n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']\n \n # Process tickersteps\n for _ in enumerate(range(tickersteps)):\n # The forward net input during the ticker steps is 0 (no information is added anymore)\n # ticker_net_fwd = 0\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Split net from recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4, value=net_bwd)\n \n # Calculate activations including ticker steps\n if self.W_tickers is not None:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n\n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n\n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']", "def compute_nsp(self, prev_layers=None, max_seq_len=None, tickersteps=0, tickerstep_nodes=False, **kwargs):\n incoming = self.incoming(prev_layers=prev_layers, comp_next_seq_pos=True, max_seq_len=max_seq_len,\n tickersteps=tickersteps, tickerstep_nodes=tickerstep_nodes, **kwargs)\n \n external_rec = None\n if self.external_rec is not None:\n external_rec = self.external_rec(prev_layers=prev_layers, max_seq_len=max_seq_len, tickersteps=tickersteps,\n tickerstep_nodes=tickerstep_nodes, comp_next_seq_pos=True,\n **kwargs)[:, -1, :]\n \n act = OrderedDict(zip_longest(self.lstm_inlets, [None]))\n \n with tf.variable_scope(self.name) as scope:\n # Make sure tensorflow can reuse the variable names\n scope.reuse_variables()\n \n # Handle restriction on maximum sequence length\n if max_seq_len is not None:\n incoming = incoming[:, :max_seq_len, :]\n \n #\n # Compute LSTM cycle at each sequence position in 'incoming'\n #\n \n # Loop through sequence positions and get corresponding net_fwds\n for seq_pos, net_fwd in self.comp_net_fwd(incoming):\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Sum up net from forward and recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n # peepholes could be added here #\n \n # Calculate activations\n if tickerstep_nodes and (self.W_tickers is not None):\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n \n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']\n \n # Process tickersteps\n for _ in enumerate(range(tickersteps)):\n # The forward net input during the ticker steps is 0 (no information is added anymore)\n # ticker_net_fwd = 0\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Split net from recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4, value=net_bwd)\n \n # Calculate activations including ticker steps\n if self.W_tickers is not None:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n \n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )", "def build_lstm_nnet(X, base_config, mid_layers_config, model_loss, optimizer):\n n_input_neurons = X.shape[1]\n\n model = Sequential()\n model.add(\n LSTM(\n base_config[\"first_layer\"][\"mult\"] * n_input_neurons,\n input_shape=(n_input_neurons, X.shape[2]),\n return_sequences=True,\n )\n )\n model.add(Dropout(rate=base_config[\"first_layer\"][\"dropout_rate\"]))\n\n for i in range(mid_layers_config[\"n_layers\"]):\n model.add(\n LSTM(mid_layers_config[\"mult\"] * n_input_neurons, return_sequences=True)\n )\n model.add(Dropout(rate=mid_layers_config[\"dropout_rate\"]))\n\n model.add(LSTM(base_config[\"last_layer\"][\"mult\"] * n_input_neurons))\n model.add(Dropout(rate=base_config[\"last_layer\"][\"dropout_rate\"]))\n # TO DO : parametrize this\n model.add(Dense(1))\n\n if model_loss == \"max_error\":\n model_loss = max_error_loss\n\n model.compile(loss=model_loss, optimizer=optimizer)\n\n return model", "def demo_train(ts_struct_list, frc_model=None, fg_mdl=None, fs_mdl=None, verbose=False,\n return_model=False, rewrite=True):\n\n # Check arguments:\n if fg_mdl is None:\n fg_mdl = frc_class.IdentityGenerator(name=\"Identity generator\", on=False)\n\n if fs_mdl is None:\n fs_mdl = gnt_class.FeatureGeneration() # IdentityModel(name=\"Identity selector\")\n\n if frc_model is None:\n frc_model = frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.01)\n\n model = frc_class.PipelineModel(gen_mdl=fg_mdl, sel_mdl=fs_mdl, frc_mdl=frc_model)\n results = []\n res_text = []\n\n for ts in ts_struct_list:\n data = regression_matrix.RegMatrix(ts, x_idx=TS_IDX, y_idx=TS_IDX)\n\n # Create regression matrix\n data.create_matrix(nsteps=N_STEPS, norm_flag=True) # this creates data.Y, data.X and some other fields\n\n # Split data for training and testing\n data.train_test_split(TRAIN_TEST_RATIO)\n\n # train the model. This returns trained pipeline and its steps\n model, frc, gen, sel = model.train_model(data.trainX, data.trainY)\n\n selection_res = \"\\n Feature selection results: problem status {}, selected {} from {} \\\\\\\\ \\n\".\\\n format(sel.status, len(sel.selected), sel.n_vars)\n\n frcY, _ = data.forecast(model) # returns forecasted matrix of the same shape as data.Y\n # frcY, idx_frc = data.forecast(model, idx_rows=data.idx_test) # this would return forecasts only for data.testY\n\n data.plot_frc(n_frc=5, n_hist=10, folder=SAVE_DIR) #this saves figures into SAVE_DIR\n\n train_mae = data.mae(idx_rows=data.idx_train, idx_original=data.original_index)\n train_mape = data.mape(idx_rows=data.idx_train, idx_original=data.original_index)\n\n test_mae = data.mae(idx_rows=data.idx_test, idx_original=data.original_index)\n test_mape = data.mape(idx_rows=data.idx_test, idx_original=data.original_index)\n\n index = [ts.data[i].name for i in TS_IDX]\n res1 = pd.DataFrame(train_mae, index=index, columns=[(\"MAE\", \"train\")])\n res2 = pd.DataFrame(train_mape, index=index, columns=[(\"MAPE\", \"train\")])\n res3 = pd.DataFrame(test_mae, index=index, columns=[(\"MAE\", \"test\")])\n res4 = pd.DataFrame(test_mape, index=index, columns=[(\"MAPE\", \"test\")])\n res = pd.concat([res1, res2, res3, res4], axis=1)\n\n configuration_str = \"\\n Time series {} forecasted with {} + '{}' feature generation model and \" \\\n \"'{}' feature selection model \\\\\\\\ \\n\".format(ts.name, frc.name, gen.name, sel.name)\n if verbose:\n print(configuration_str)\n print(selection_res)\n print(res)\n\n results.append(res)\n res_text.append(configuration_str)\n res_text.append(selection_res)\n\n saved_mdl_fname = model.save_model(file_name=FNAME_PREFIX, folder=SAVE_DIR) # saving in not an option yet\n # model = frc_class.PipelineModel().load_model(file_name=fname)\n\n # write results into a latex file\n my_plots.save_to_latex(results, df_names=res_text, folder=SAVE_DIR, rewrite=rewrite)\n print(\"Results saved to folder {}\".format(SAVE_DIR))\n\n if return_model:\n return model, saved_mdl_fname\n\n return saved_mdl_fname", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def train_lm():\n\n print(\"\\n\\nTraining started at {} - {}\\n\\n\".format(\n time.strftime(\"%d/%m/%Y\"), time.strftime(\"%H:%M:%S\")\n ))\n\n config = get_config(FLAGS.config)\n\n vocabulary = read_vocabulary(config.data_files, config.vocab_size)\n train_data, valid_data, test_data = read_lm_data(config.data_files,\n vocabulary)\n\n with tf.Graph().as_default() as graph:\n\n # define a default initializer for the model\n initializer = tf.random_uniform_initializer(\n -config.init_scale, config.init_scale, seed=seed, dtype=tf.float32)\n\n # model for training\n print(\"\\nBuilding Model for training...\")\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n\n train_data_producer = lm_data_producer(train_data,\n config.batch_size,\n config.num_steps)\n\n train_queue = tf.FIFOQueue(\n capacity=len(train_data_producer[0]), dtypes=[tf.int32, tf.int32],\n shapes=[[config.num_steps]] * 2)\n\n train_inputs = tf.convert_to_tensor(train_data_producer[0],\n dtype=tf.int32)\n train_targets = tf.convert_to_tensor(train_data_producer[1],\n dtype=tf.int32)\n enqueue_op_train = train_queue.enqueue_many([train_inputs,\n train_targets])\n\n qr_train = tf.train.QueueRunner(train_queue, [enqueue_op_train] * 2)\n tf.train.add_queue_runner(qr_train)\n\n mtrain = AttentiveLM(is_training=True,\n params=config,\n batch_size=config.batch_size,\n num_steps=config.num_steps,\n queue=train_queue,\n keep_attention_weights=False,\n log_tensorboard=FLAGS.log_tensorboard)\n print(\"Batch size: {:d}\".format(mtrain.batch_size))\n print(\"# of steps: {:d}\".format(mtrain.num_steps))\n\n # model for validation\n print(\"\\nBuilding Model for validation...\")\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n\n num_valid_steps = max([len(sample) for sample in valid_data])\n valid_data_producer = lm_data_producer(\n valid_data, config.batch_size, num_valid_steps)\n\n valid_queue = tf.FIFOQueue(\n capacity=len(valid_data_producer[0]), dtypes=[tf.int32, tf.int32],\n shapes=[[num_valid_steps]] * 2)\n\n valid_inputs = tf.convert_to_tensor(\n valid_data_producer[0], dtype=tf.int32)\n valid_targets = tf.convert_to_tensor(\n valid_data_producer[1], dtype=tf.int32)\n enqueue_op_valid = valid_queue.enqueue_many(\n [valid_inputs, valid_targets])\n\n qr_valid = tf.train.QueueRunner(valid_queue, [enqueue_op_valid] * 2)\n tf.train.add_queue_runner(qr_valid)\n\n mvalid = AttentiveLM(\n is_training=False, params=config, batch_size=config.batch_size,\n num_steps=num_valid_steps, queue=valid_queue,\n keep_attention_weights=False)\n print(\"# of validation steps: {:d}\".format(num_valid_steps))\n\n # configure the session\n proto_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=False)\n\n # save training and best models\n saver = tf.train.Saver(max_to_keep=3)\n saver_best = tf.train.Saver(max_to_keep=1)\n\n supervisor = tf.train.Supervisor(logdir=FLAGS.train_dir,\n saver=saver, save_model_secs=0)\n\n with supervisor.managed_session(config=proto_config) as session:\n\n # supervisor.\n\n best_valid_ppx = np.inf\n estop_counter = 0\n\n for epoch in range(FLAGS.max_epochs):\n\n lr_decay = config.lr_decay ** max(epoch - config.start_decay, 0.0)\n mtrain.assign_lr(session, config.learning_rate * lr_decay)\n\n # print info\n print(\"\\nEpoch: {:d} - Learning rate: {:e}\".format(\n epoch, session.run(mtrain.lr_rate)))\n\n _ = run_epoch(session, mtrain, train_data, is_train=True)\n\n # Save checkpoint\n print(\"\\nSaving current model...\")\n checkpoint_path = os.path.join(FLAGS.train_dir, FLAGS.model_name)\n saver.save(session, checkpoint_path, global_step=mtrain.global_step)\n\n print(\"\\nRunning validation...\")\n valid_ppx = run_epoch(session, mvalid, valid_data, is_train=False)\n print(\"Epoch {:d}: - Valid Perplexity: {:.8f}\".format(epoch, valid_ppx))\n\n # check early stop\n if FLAGS.early_stop_patience > 0:\n\n if best_valid_ppx > valid_ppx:\n best_valid_ppx = valid_ppx\n estop_counter = 0\n print('\\nSaving the best model so far...')\n model_name = FLAGS.model_name + '-best'\n best_model_path = os.path.join(FLAGS.best_models_dir, model_name)\n saver_best.save(session, best_model_path,\n global_step=mtrain.global_step)\n else:\n estop_counter += 1\n\n print(\"\\n\\tbest valid. ppx: {:.8f}\".format(best_valid_ppx))\n print(\"early stop patience: {:d} - max {:d}\\n\".format(\n estop_counter, FLAGS.early_stop_patience))\n\n if estop_counter >= FLAGS.early_stop_patience:\n print('\\nEARLY STOP!\\n')\n supervisor.request_stop()\n supervisor.coord.join(threads)\n break\n\n # when we ran the right number of epochs or we reached early stop we\n # finish training\n print(\"\\n\\nTraining finished at {} - {}\\n\\n\".format(\n time.strftime(\"%d/%m/%Y\"), time.strftime(\"%H:%M:%S\")\n ))\n\n with tf.Graph().as_default() as test_graph:\n\n # model for testing\n print(\"\\n\\nBuilding Model for testing...\\n\")\n with tf.name_scope(\"Test\"):\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n num_test_steps = max([len(sample) for sample in test_data])\n\n test_data_producer = lm_data_producer(\n test_data, config.batch_size, num_test_steps)\n\n test_queue = tf.FIFOQueue(\n capacity=len(test_data_producer[0]), dtypes=[tf.int32, tf.int32],\n shapes=[[num_test_steps]] * 2)\n\n test_inputs = tf.convert_to_tensor(\n test_data_producer[0], dtype=tf.int32)\n test_targets = tf.convert_to_tensor(\n test_data_producer[1], dtype=tf.int32)\n enqueue_op_test = test_queue.enqueue_many(\n [test_inputs, test_targets])\n\n qr_test = tf.train.QueueRunner(test_queue, [enqueue_op_test] * 2)\n tf.train.add_queue_runner(qr_test)\n mtest = AttentiveLM(is_training=False,\n params=config,\n batch_size=config.batch_size,\n num_steps=num_test_steps,\n keep_attention_weights=True)\n print(\"# of test steps: {:d}\".format(num_test_steps))\n\n saver_test = tf.train.Saver(max_to_keep=1)\n test_supervisor = tf.train.Supervisor(\n logdir=FLAGS.best_models_dir, summary_writer=None,\n saver=saver_test, save_model_secs=0)\n\n with test_supervisor.managed_session(config=proto_config) as test_session:\n # eval on test\n print(\"\\nRunning test...\")\n test_ppx = run_epoch(\n test_session, mtest, test_data,\n is_train=False, plot_attention_weights=True)\n print(\"Test Perplexity: {:.8f}\".format(test_ppx))\n\n test_supervisor.request_stop()\n test_supervisor.coord.join()\n\n sys.stdout.flush()", "def get_model(eventclouds, \n seq_len,\n num_classes,\n is_training,\n bn_decay=None):\n \n num_point = eventclouds.get_shape()[-2].value \n batch_size = eventclouds.get_shape()[0].value \n eventclouds = tf.reshape(eventclouds, [-1, num_point, 3])\n \n end_points = {}\n l0_xyz = eventclouds\n l0_points = None\n end_points['l0_xyz'] = l0_xyz\n\n # Set abstraction layers\n \n l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=256, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')\n l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=64, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')\n l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')\n \n # Fully connected layers\n net = tf.reshape(l3_points, [batch_size, -1])\n net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')\n net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')\n net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc3')\n\n return net", "def test_model_with_simple_rnn_layer(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.SimpleRNN(12, name='rnn0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./simple_rnn', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n simple_rnn_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n simple_rnn_detected = True\n inner_list = op.internal_ops\n self.assertEqual(49, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('rnn0/while/MatMul'))\n self.assertEqual('rnn0', op.name)\n self.assertTrue(simple_rnn_detected)\n\n # check for 2 MatMuls, 1 BiasAdd and an activation function in the inner op list\n valid_matmuls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Tanh' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(2, len(valid_matmuls))\n self.assertEqual(1, len(valid_bias_add))\n self.assertEqual(1, len(valid_activation))", "def lstm_atten(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.5))\n model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n # model.add(Dropout(0.5))\n\n attention = Dense(1, activation='tanh')(activations)\n attention = Flatten()(attention)\n attention = Activation('softmax')(attention)\n attention = RepeatVector(2048)(attention)\n attention = Permute([2, 1])(attention)\n\n sent_representation = concatenate([activations, attention], mode='mul')\n sent_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(2048,))(sent_representation)\n\n probabilities = Dense(self.nb_classes, activation='softmax')(sent_representation)\n\n model = model(input=self.input_shape, output=probabilities )\n\n dense1800 = Dense(4096, activation='relu')\n\n #dense1800 = Dense(1800, activation='relu', kernel_regularizer=regularizers.l2(0.01))(inputs)\n attention_probs = Dense(4096, activation='sigmoid', name='attention_probs')(dense1800)\n attention_mul = multiply([dense1800, attention_probs], name='attention_mul')\n dense7 = Dense(self.nb_classes, kernel_regularizer=regularizers.l2(0.01), activation='softmax')(attention_mul)\n model = Model(input=[self.input_shape], output=dense7)\n return model", "def train(data_train, data_test):\n data = data_train\n # # xxx = [item for xx in data for item in xx]\n # xxx = []\n # for xx in data:\n # xxx.extend(xx.flatten())\n\n checkpoint_and_write_save_dir = logdir()\n\n os.system(\"mkdir -p checkpoints\")\n os.system(\"mkdir -p checkpoints/{}\".format(checkpoint_and_write_save_dir))\n\n writer = SummaryWriter(os.path.join(\"runs\", checkpoint_and_write_save_dir), comment=\"FreqWarp\")\n\n logging.info(\"Building architecture...\")\n\n if use_cuda:\n net = Net(20, 20, 20, nb_lstm_layers, batch_size).cuda()\n else:\n net = Net(20, 20, 20, nb_lstm_layers, batch_size)\n net.train()\n\n # optimizer = optim.SGD(net.parameters(), lr=0.001)\n # optimizer = optim.Adam(net.parameters(), lr=0.005, weight_decay=0.0001)\n optimizer = optim.RMSprop(net.parameters(), lr=0.005, weight_decay=0.0001)\n\n # criterion = nn.MSELoss()\n criterion = nn.L1Loss(size_average=False)\n\n logging.info(\"Reading data ...\")\n\n best_avg_loss = 1000000\n best_avg_loss_at_epoch = 0\n\n logging.info(\"START TRAINING ... MAX EPOCH: \" + str(nb_epoch))\n for epoch in range(nb_epoch):\n print(\"====================================================================\")\n count = 0\n loss_sum = 0\n\n for i in range(len(data)):\n if use_cuda:\n temp_x = torch.tensor(data[i][0]).cuda()\n temp_y = torch.tensor(data[i][1]).cuda()\n else:\n temp_x = torch.tensor(data[i][0])\n temp_y = torch.tensor(data[i][1])\n \n # exit()\n # for ii in range(0, data[i][0].shape[0] - nb_frame_in_batch*2 + 1):\n optimizer.zero_grad()\n\n h_state = net.hidden_init(temp_x) # New added Dec 07: They say hidden state need to be clear before each step\n\n # prediction, h_state = net(batch_x.float(), h_state)\n prediction, h_state = net(temp_x.float(), h_state)\n # prediction = net(batch_x.unsqueeze(0).float(), None)\n\n loss = criterion(prediction.float(), temp_y.float().view(len(temp_y), batch_size, -1))\n\n # h_state = (h_state[0].detach(), h_state[1].detach())\n\n loss.backward()\n optimizer.step()\n\n loss_sum += loss\n count += 1\n\n else:\n with torch.no_grad():\n losses = []\n for i in range(len(data_test)):\n if use_cuda:\n temp_x = torch.tensor(data_test[i][0]).cuda()\n temp_y = torch.tensor(data_test[i][1]).cuda()\n else:\n temp_x = torch.tensor(data_test[i][0])\n temp_y = torch.tensor(data_test[i][1])\n\n h_state = net.hidden_init(temp_x)\n prediction, h_state = net(temp_x.float(), h_state)\n loss = criterion(prediction.float(), temp_y.float().view(len(temp_y), batch_size, -1))\n\n losses.append(loss.data.item())\n logging.info(describe(losses))\n\n writer.add_scalar(\"loss/minibatch\", loss_sum / count, global_step=epoch)\n # writer.add_graph(net, (temp_x.float(), h_state), verbose=True)\n\n # for m_index, m in enumerate(net.parameters()):\n # print(m_index)\n # print(net_modules[m_index])\n # writer.add_histogram('histogram/', m.data, global_step=epoch)\n for name, param in net.named_parameters():\n writer.add_histogram('histogram/' + name, param.data, global_step=epoch)\n\n avg_loss = loss_sum / count\n if avg_loss < best_avg_loss:\n state = {\n 'epoch': epoch,\n 'state_dict': net,\n 'optimizer': optimizer\n }\n\n save_checkpoint(checkpoint_and_write_save_dir + \"/\" + MODEL_PTH_NAME + \"_epoch\" + str(epoch) + \"_\" + str(round(float(avg_loss), 3)), model=net, state=state)\n\n logging.info(\"Epoch {}: average loss = {:.3f}, improve {:.3f} from {:.3f}. Model saved at checkpoints/{}/{}.pth\"\n .format(epoch, avg_loss, best_avg_loss - avg_loss, best_avg_loss, checkpoint_and_write_save_dir, MODEL_PTH_NAME + \"_epoch\" + str(epoch) + \"_\" + str(round(float(avg_loss), 3))))\n\n best_avg_loss = avg_loss\n best_avg_loss_at_epoch = epoch\n\n elif epoch - best_avg_loss_at_epoch > patience:\n logging.info(\"Model hasn't improved since epoch {}. Stop training ...\".format(best_avg_loss_at_epoch))\n break\n else:\n logging.info(\"Epoch {}: average loss = {:.3f}. No improvement since epoch {}\".format(epoch, avg_loss, best_avg_loss_at_epoch))\n\n writer.close()\n\n return net", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def evaluate_lenet5(learning_rate=0.005, n_epochs=8000,\n dataset='MembraneSamples_95x95x1_mp0.50_train5000_valid1000_test1000.pkl.gz',\n nkerns=[32, 32, 32, 32], batch_size=500):\n\n rng = numpy.random.RandomState(23455)\n\n #nsplits = 10\n #current_split = 0\n #datasets = load_data(dataset.format(current_split, nsplits))\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]\n n_test_batches = test_set_x.get_value(borrow=True).shape[0]\n n_train_batches /= batch_size\n n_valid_batches /= batch_size\n n_test_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n ishape = (95, 95) # this is the size of white and black patches\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 95*95)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n layer0_input = x.reshape((batch_size, 1, 95, 95))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (95-4+1, 95-4+1)=(92, 92)\n # maxpooling reduces this further to (92/2, 92/2) = (46, 46)\n # 4D output tensor is thus of shape (batch_size,nkerns[0], 46, 46)\n layer0 = LeNetConvPoolLayer(rng, input=layer0_input,\n image_shape=(batch_size, 1, 95, 95),\n filter_shape=(nkerns[0], 1, 4, 4), poolsize=(2, 2))\n \n # Construct the second convolutional pooling layer\n # filtering reduces the image size to (46-5+1, 46-5+1)=(42,42)\n # maxpooling reduces this further to (42/2, 42/2) = (21,21)\n # 4D output tensor is thus of shape (nkerns[0],nkerns[1], 21, 21)\n layer1 = LeNetConvPoolLayer(rng, input=layer0.output,\n image_shape = (batch_size, nkerns[0], 46, 46),\n filter_shape = (nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2))\n\n # Construct the third convolutional pooling layer\n # filtering reduces the image size to (21-4+1, 21-4+1) = (18, 18)\n # maxpooling reduces this further to (18/2, 18/2) = (9, 9)\n # 4D output tensor is thus of shape (nkerns[1], nkerns[2], 9, 9)\n layer2 = LeNetConvPoolLayer(rng, input=layer1.output,\n image_shape=(batch_size, nkerns[1], 21, 21),\n filter_shape=(nkerns[2], nkerns[1], 4, 4), poolsize=(2, 2))\n\n # Construct the fourth convolutional pooling layer\n # filtering reduces the image size to (9-4+1, 9-4+1) = (6, 6)\n # maxpooling reduces this further to (6/2, 6/2) = (3, 3)\n # 4D output tensor is thus of shape (nkerns[2], nkerns[3], 3,3)\n layer3 = LeNetConvPoolLayer(rng, input=layer2.output,\n image_shape=(batch_size, nkerns[2], 9, 9),\n filter_shape=(nkerns[3], nkerns[2], 4, 4), poolsize=(2, 2))\n\n # the TanhLayer being fully-connected, it operates on 2D matrices of\n # shape (batch_size, num_pixels) (i.e matrix of rasterized images).\n layer4_input = layer3.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer4 = HiddenLayer(rng, input=layer4_input, n_in=nkerns[3] * 3 * 3,\n n_out=100, activation=T.tanh)\n\n # classify the values of the fully-connected sigmoidal layer\n layer5 = LogisticRegression(input=layer4.output, n_in=100, n_out=2)\n\n # the cost we minimize during training is the NLL of the model\n cost = layer5.negative_log_likelihood(y)\n\n # create a function to compute the mistakes that are made by the model\n test_model = theano.function([index], layer5.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]})\n\n validate_model = theano.function([index], layer5.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]})\n \n\n ##### Attempt to load a progress file #####\n\n epoch = 0\n best_validation_loss = numpy.inf\n \n outfile = dataset.replace('.pkl.gz', '.progress.pkl.gz')\n if os.path.isfile(outfile):\n f = gzip.open(outfile, 'rb')\n iter, best_params, this_validation_loss, test_score = cPickle.load(f)\n f.close()\n epoch = numpy.floor(iter / n_train_batches)\n best_validation_loss = this_validation_loss\n layer5.W.set_value(best_params[0][0].get_value())\n layer5.b.set_value(best_params[0][1].get_value())\n layer4.W.set_value(best_params[1][0].get_value())\n layer4.b.set_value(best_params[1][1].get_value())\n layer3.W.set_value(best_params[2][0].get_value())\n layer3.b.set_value(best_params[2][1].get_value())\n layer2.W.set_value(best_params[3][0].get_value())\n layer2.b.set_value(best_params[3][1].get_value())\n layer1.W.set_value(best_params[4][0].get_value())\n layer1.b.set_value(best_params[4][1].get_value())\n layer0.W.set_value(best_params[5][0].get_value())\n layer0.b.set_value(best_params[5][1].get_value())\n print 'Loaded progress file. Up to epoch {0}, validation error {1}, test error {2}.'.format(epoch, this_validation_loss * 100, test_score * 100)\n\n # create a list of all model parameters to be fit by gradient descent\n params = layer5.params + layer4.params + layer3.params + layer2.params + layer1.params + layer0.params\n\n # create a list of gradients for all model parameters\n grads = T.grad(cost, params)\n\n # train_model is a function that updates the model parameters by\n # SGD Since this model has many parameters, it would be tedious to\n # manually create an update rule for each model parameter. We thus\n # create the updates dictionary by automatically looping over all\n # (params[i],grads[i]) pairs.\n\n #updates = {}\n #for param_i, grad_i in zip(params, grads):\n # updates[param_i] = param_i - learning_rate * grad_i\n \n updates = []\n for param_i, grad_i in zip(params, grads):\n updates.append((param_i, param_i - learning_rate * grad_i))\n\n train_model = theano.function([index], cost, updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]})\n\n ###############\n # TRAIN MODEL #\n ###############\n print '... training'\n # early-stopping parameters\n patience = 10000 # look at this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatche before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_params = None\n best_iter = 0\n test_score = 0.\n start_time = time.clock()\n\n done_looping = False\n\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n iter = epoch * n_train_batches + minibatch_index\n\n if iter % 100 == 0:\n print 'training @ iter = ', iter\n cost_ij = train_model(minibatch_index)\n\n if (iter + 1) % validation_frequency == 0: \n \n # compute zero-one loss on validation set\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n print('epoch %i, minibatch %i/%i, validation error %f %%' % \\\n (epoch, minibatch_index + 1, n_train_batches, \\\n this_validation_loss * 100.))\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n\n #improve patience if loss improvement is good enough\n if this_validation_loss < best_validation_loss * \\\n improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n # save best validation score and iteration number\n best_validation_loss = this_validation_loss\n best_iter = iter\n best_params = (layer5.params, layer4.params, layer3.params, layer2.params, layer1.params, layer0.params)\n\n # test it on the test set\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n print((' epoch %i, minibatch %i/%i, test error of best '\n 'model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.))\n\n f = gzip.open(outfile,'wb', compresslevel=1)\n cPickle.dump((iter, best_params, this_validation_loss, test_score),f)\n f.close()\n print 'Progress saved.'\n\n if patience <= iter:\n done_looping = True\n break\n\n #Load a new dataset split after each epoch\n\n # current_split = (current_split + 1) % nsplits\n\n # datasets = load_data(dataset.format(current_split, nsplits))\n\n # train_set_x, train_set_y = datasets[0]\n # valid_set_x, valid_set_y = datasets[1]\n # test_set_x, test_set_y = datasets[2]\n\n end_time = time.clock()\n print('Optimization complete.')\n print('Best validation score of %f %% obtained at iteration %i,'\\\n 'with test performance %f %%' %\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\n\n\n print >> sys.stderr, ('The code ran for %.2fm' % ((end_time - start_time) / 60.))\n\n return best_params", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, num_units=512, num_layers=4):\n\n self.num_units = num_units\n self.num_layers = num_layers\n \n self.layers = []\n for idx in range(self.num_layers):\n lstm_layer = tf.contrib.cudnn_rnn.CudnnLSTM(num_units=self.num_units,\n direction='unidirectional',\n num_layers=1)\n self.layers.append(lstm_layer)", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude", "def build_model(options,worddicts):\n opt_ret=dict()\n params=dict()\n word_xr1_mask=tf.reverse(word_x1_mask,[1])\n word_xr2_mask = tf.reverse(word_x2_mask, [1])\n\n\n\n #embedding layer\n word_embedding = norm_weight(options['n_words'], options['dim_word'])\n if options['embedding']:\n with open(options['embedding'], 'r',encoding='iso-8859-1') as f:\n for line in f:\n temp=line.split()\n word=temp[0]\n vector=temp[1:]\n if word in worddicts and worddicts[word]<options['n_words']:\n word_embedding[worddicts[word],:]=vector\n\n word_embedding_layer=tf.Variable(word_embedding,name='word_embedding')\n\n emb1=tf.nn.embedding_lookup(word_embedding_layer,word_x1,name='embedding_word_lookup1')\n emb2=tf.nn.embedding_lookup(word_embedding_layer,word_x2,name='embedding_word_lookup2')\n\n if options['use_dropout']:\n emb1=tf.cond(use_noise,lambda :tf.nn.dropout(emb1,0.5),lambda :emb1)\n emb2 = tf.cond(use_noise, lambda: tf.nn.dropout(emb2, 0.5), lambda: emb2)\n\n #1-layer LSTM\n print('LSTM result')\n for l in range(1):\n #param_init_lstm\n prefix = 'encoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'encoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=emb1\n ctx2=emb2\n else:\n ctx1=tf.concat([ctx1,emb1],axis=2)\n ctx2=tf.concat([ctx2,emb2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n ctx1=ctx1*word_x1_mask[:,:,None]\n ctx2 = ctx2 * word_x2_mask[:, :, None]\n def _step(h,x):\n return tf.matmul(x[0],x[1])\n temp=tf.zeros((tf.shape(ctx1)[1],tf.shape(ctx2)[1]))\n weight_martrix=tf.scan(_step,[ctx1,tf.transpose(ctx2,[0,2,1])],temp)\n weight_martrix_1=tf.exp(weight_martrix)*word_x2_mask[:,None,:]\n weight_martrix_2=tf.transpose(tf.exp(weight_martrix)*word_x1_mask[:,:,None],[0,2,1])\n weight_martrix_1=weight_martrix_1/tf.reduce_sum(weight_martrix_1,axis=2)[:,:,None]\n weight_martrix_2 = weight_martrix_2 / tf.reduce_sum(weight_martrix_2, axis=2)[:,:,None]\n\n ctx1_=tf.reduce_sum(weight_martrix_1[:,:,:,None]*ctx2[:,None,:,:],axis=2)\n ctx2_ = tf.reduce_sum(weight_martrix_2[:, :, :, None] * ctx1[:, None, :, :],axis=2)\n inp1=tf.concat([ctx1, ctx1_, ctx1*ctx1_, ctx1-ctx1_],axis=2)\n inp2 = tf.concat([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], axis=2)\n params = param_init_fflayer(options, params, prefix='projection',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n\n\n s=tf.shape(inp1)\n inp1 = tf.nn.relu(tf.matmul(tf.reshape(inp1,[-1,int(inp1.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp1=tf.reshape(inp1,tf.concat([s[:2],[-1]],0))\n s=tf.shape(inp2)\n inp2 = tf.nn.relu(tf.matmul(tf.reshape(inp2,[-1,int(inp2.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp2=tf.reshape(inp2,tf.concat([s[:2],[-1]],0))\n if options['use_dropout']:\n inp1=tf.cond(use_noise,lambda :tf.nn.dropout(inp1,0.5),lambda :inp1)\n inp2 = tf.cond(use_noise, lambda: tf.nn.dropout(inp2, 0.5), lambda: inp2)\n\n\n for l in range(1):\n #param_init_lstm\n prefix = 'decoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'decoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=inp1\n ctx2=inp2\n else:\n ctx1=tf.concat([ctx1,inp1],axis=2)\n ctx2=tf.concat([ctx2,inp2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n mean_1=tf.reduce_sum(ctx1*word_x1_mask[:,:,None],axis=1)/tf.reduce_sum(word_x1_mask,axis=1)[:,None]\n max_1=tf.reduce_max(ctx1*word_x1_mask[:,:,None],axis=1)\n\n mean_2=tf.reduce_sum(ctx2*word_x2_mask[:,:,None],axis=1)/tf.reduce_sum(word_x2_mask,axis=1)[:,None]\n max_2=tf.reduce_max(ctx2*word_x2_mask[:,:,None],axis=1)\n\n #represention and MLP layer\n logit=tf.concat([mean_1,mean_2,max_1,max_2],axis=1)\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n\n params = param_init_fflayer(options, params, prefix='ff_layer_1',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n params = param_init_fflayer(options, params, prefix='ff_layer_output',\n nin=options['dim'], nout=3, ortho=False)\n logit=tf.nn.tanh(tf.matmul(logit,params[_p('ff_layer_1','W')])+params[_p('ff_layer_1','b')])\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n logit=tf.matmul(logit, params[_p('ff_layer_output', 'W')]) + params[_p('ff_layer_output', 'b')]\n probs=tf.nn.softmax(logit)\n pred=tf.argmax(probs,1)\n cost=tf.losses.sparse_softmax_cross_entropy(y,logit)\n return opt_ret,cost,pred,probs", "def test_char_lstm_word_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-lstm\"},\n \"params\": {\"emb_dim\": 5},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n incorrect_config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": True}}\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**incorrect_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n incorrect_config = {\n **config, \"params\": {**config[\"params\"], \"tokenizer_type\": \"char-tokenizer\"}}\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**incorrect_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n incorrect_config = {\n **config, \"params\": {\n **config[\"params\"], \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"bert-base-cased\"}\n }\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**incorrect_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"char_lstm_output_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n glove_config = {**config, \"params\": {\"embedder_type\": \"glove\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**glove_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def compute_nsp(self, prev_layers=None, max_seq_len=None, tickersteps=0, tickerstep_nodes=False, **kwargs):\n incoming = self.incoming(prev_layers=prev_layers, comp_next_seq_pos=True, max_seq_len=max_seq_len,\n tickersteps=tickersteps, tickerstep_nodes=tickerstep_nodes, **kwargs)\n \n external_rec = None\n if self.external_rec is not None:\n external_rec = self.external_rec(prev_layers=prev_layers, max_seq_len=max_seq_len, tickersteps=tickersteps,\n tickerstep_nodes=tickerstep_nodes, comp_next_seq_pos=True,\n **kwargs)[:, -1, :]\n \n act = OrderedDict(zip_longest(self.lstm_inlets, [None]))\n \n with tf.variable_scope(self.name) as scope:\n # Make sure tensorflow can reuse the variable names\n scope.reuse_variables()\n \n # Handle restriction on maximum sequence length\n if max_seq_len is not None:\n incoming = incoming[:, :max_seq_len, :]\n \n #\n # Compute LSTM cycle at each sequence position in 'incoming'\n #\n \n # Loop through sequence positions and get corresponding net_fwds\n for seq_pos, net_fwd in self.comp_net_fwd(incoming):\n self.cur_net_fwd = net_fwd\n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Sum up net from forward and recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4,\n value=net_fwd + net_bwd)\n \n # peepholes could be added here #\n \n # Calculate activations\n if tickerstep_nodes and (self.W_tickers is not None):\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n \n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']\n \n # Process tickersteps\n for _ in enumerate(range(tickersteps)):\n # The forward net input during the ticker steps is 0 (no information is added anymore)\n # ticker_net_fwd = 0\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = dot_product(self.h[-1], self.W_bwd_conc)\n else:\n net_bwd = dot_product(external_rec, self.W_bwd_conc)\n \n # Split net from recurrent connections\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=1, num_or_size_splits=4, value=net_bwd)\n \n # Calculate activations including ticker steps\n if self.W_tickers is not None:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n \n self.ig.append(act['ig'])\n self.og.append(act['og'])\n self.ci.append(act['ci'])\n self.fg.append(act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n self.ci[-1] = act['ci']\n self.og[-1] = act['og']\n self.ig[-1] = act['ig']\n self.fg[-1] = act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']", "def compute_nsp(self, prev_layers=None, max_seq_len=None, tickersteps=0, tickerstep_nodes=False, **kwargs):\n incoming = self.incoming(prev_layers=prev_layers, max_seq_len=max_seq_len, tickersteps=tickersteps,\n tickerstep_nodes=tickerstep_nodes, comp_next_seq_pos=True, **kwargs)\n \n external_rec = None\n if self.external_rec is not None:\n external_rec = self.external_rec(prev_layers=prev_layers, max_seq_len=max_seq_len, tickersteps=tickersteps,\n tickerstep_nodes=tickerstep_nodes, comp_next_seq_pos=True,\n **kwargs)[:, -1, :]\n \n with tf.variable_scope(self.name) as scope:\n \n act = OrderedDict(zip_longest(self.lstm_inlets, [None]))\n \n # Make sure tensorflow can reuse the variable names\n scope.reuse_variables()\n \n # Handle restriction on maximum sequence length\n if max_seq_len is not None:\n incoming = incoming[:, :max_seq_len, :]\n \n #\n # Compute LSTM cycle at each sequence position in 'incoming'\n #\n \n # Loop through sequence positions and get corresponding net_fwds\n for seq_pos, net_fwd in self.comp_net_fwd(incoming):\n # Get previous output/hidden state\n h_prev = self.h[-1]\n \n # Reduce nr of recurrent features\n if self.reduce_recurrents is not None:\n h_prev = self.reduce_recurrents(h_prev)\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = conv2d(h_prev, self.W_bwd_conc, dilation_rate=self.dilation_rate, name=\"net_bwd\")\n else:\n net_bwd = conv2d(external_rec, self.W_bwd_conc, dilation_rate=self.dilation_rate, name=\"net_bwd\")\n \n # Combine net from forward and recurrent connections\n if self.comb == 'mul':\n # TODO: Implement correct version of multiplication combination\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=3, num_or_size_splits=4,\n value=net_fwd * net_bwd, name=\"net_input\")\n elif self.comb == 'add':\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=3, num_or_size_splits=4,\n value=net_fwd + net_bwd, name=\"net_input\")\n else:\n raise ValueError(\"Combination method {} unknown\".format(self.comb))\n \n # Calculate activations\n if tickerstep_nodes and (self.W_tickers is not None):\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # ci, ig, og, fg = [self.a['ci'](ci + self.b['ci']), self.a['ig'](ig + self.b['ig']),\n # self.a['og'](og + self.b['og']), self.a['fg'](fg + self.b['fg'])]\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']\n \n # Process tickersteps\n for _ in enumerate(range(tickersteps)):\n # The forward net input during the ticker steps is 0 (no information is added anymore)\n # ticker_net_fwd = 0\n \n # Get previous output/hidden state\n h_prev = self.h[-1]\n \n # Reduce nr of recurrent features\n if self.reduce_recurrents is not None:\n h_prev = self.reduce_recurrents(h_prev)\n \n # Calculate net for recurrent connections at current sequence position\n if self.external_rec is None:\n net_bwd = conv2d(h_prev, self.W_bwd_conc, dilation_rate=self.dilation_rate, name=\"net_bwd\")\n else:\n net_bwd = conv2d(external_rec, self.W_bwd_conc, dilation_rate=self.dilation_rate, name=\"net_bwd\")\n \n # Combine net from forward and recurrent connections\n if self.comb == 'mul':\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=3, num_or_size_splits=4, value=net_bwd,\n name=\"net_input\")\n elif self.comb == 'add':\n act['ci'], act['ig'], act['og'], act['fg'] = tf.split(axis=3, num_or_size_splits=4, value=net_bwd,\n name=\"net_input\")\n else:\n raise ValueError(\"Combination method {} unknown\".format(self.comb))\n \n # Calculate activations including ticker steps\n if self.W_tickers is not None:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g] + self.W_tickers[g])\n for g in self.lstm_inlets]))\n else:\n act = OrderedDict(zip(self.lstm_inlets, [self.a[g](act[g] + self.b[g])\n for g in self.lstm_inlets]))\n \n # ci, ig, og, fg = [a_ci(ci + b_ci + W_tci), a_ig(ig + b_ig + W_tig), a_og(og + b_og + W_tog),\n # a_fg(fg + b_fg + W_tfg)]\n \n # Calculate new cell state\n if self.store_states:\n self.c.append(act['ci'] * act['ig'] + self.c[-1] * act['fg'])\n else:\n self.c[-1] = act['ci'] * act['ig'] + self.c[-1] * act['fg']\n \n # Calculate new output with new cell state\n if self.store_states:\n self.h.append(self.a['out'](self.c[-1]) * act['og'])\n else:\n self.h[-1] = self.a['out'](self.c[-1]) * act['og']", "def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):\n param_cells = []\n last_states = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert len(last_states) == num_lstm_layer\n\n # embedding layer\n data = mx.sym.Variable('data')\n wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n\n hidden_all = []\n for seqidx in range(seq_len):\n hidden = wordvec[seqidx]\n for i in range(num_lstm_layer):\n next_state = _lstm(\n num_hidden=num_hidden,\n indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx,\n layeridx=i)\n hidden = next_state.h\n last_states[i] = next_state\n hidden_all.append(hidden)\n\n hidden_concat = mx.sym.Concat(*hidden_all, dim=0)\n pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name=\"pred_fc\")\n return pred_fc", "def test_glove_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": {\"embedder_type\": \"glove\"},\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def FirstLayer(net, l2_reg_val, is_training):\n\n # Intact\n # global VARS\n # l2_reg = tf.contrib.layers.l2_regularizer(l2_reg_val)\n # VARS['1'] = net\n # net = tf.contrib.layers.fully_connected(\n # net, 40, activation_fn=None, weights_regularizer=l2_reg)\n # VARS['2'] = net\n # net = tf.nn.relu(net)\n # VARS['3'] = net # etc\n\n ## keep net for Bonus part test\n batch_size, number_of_vocabulary_tokens = net.shape\n net_input = tf.placeholder(tf.float32, [None, number_of_vocabulary_tokens], name='net_input')\n net_input = 1 * net # to make a copy\n\n ### ME\n net_norm = tf.nn.l2_normalize(net, axis=0) # ME Preprocess the layer input\n # net = tf.contrib.layers.fully_connected(net, 40, activation_fn=tf.nn.tanh,\n # weights_regularizer=l2_weighted_regularizer_(scale=l2_reg_val, net_=net),\n # normalizer_fn=tf.contrib.layers.batch_norm, scope=\"fc1\") # ME If normalizer_fn is given no bias is added\n # net = tf.layers.dense(net_norm, units=40, activation=None, use_bias=False,\n # kernel_regularizer=l2_weighted_regularizer_(scale=l2_reg_val, net_=net_norm), name=\"fc1\")\n # net = tf.layers.dense(net_norm, units=40, activation=None, use_bias=False, name=\"fc1\")\n net = tf.contrib.layers.fully_connected(net_norm, 40, activation_fn=None,\n normalizer_fn=None, biases_initializer=None, scope=\"fc1\")\n y = tf.trainable_variables()[0]\n # net_inside = tf.matmul(net_norm, y)\n # reg_loss_ = l2_reg_val * tf.nn.l2_loss(net_inside)\n # tf.losses.add_loss(reg_loss_, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n tf.losses.add_loss(l2_reg_val * tf.nn.l2_loss(net), loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n net = tf.nn.tanh(net) # ME\n net = tf.contrib.layers.batch_norm(net, is_training=is_training) # ME\n # net = tf.layers.batch_normalization(net, training=is_training)\n # tf.losses.add_loss(l2_reg_val*tf.math.square(tf.norm(net*net)), loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES) # Y=?\n\n ## from scratch\n # batch_size, number_of_vocabulary_tokens = net.shape\n # net = tf.nn.l2_normalize(net, axis=0) # ME Preprocess the layer input\n # # weights = tf.Variable(tf.truncated_normal([number_of_vocabulary_tokens._value, 40]), name=\"w_fc1\")\n # net_in = tf.matmul(net, weights)\n # net = tf.nn.tanh(net_in)\n # net = tf.contrib.layers.batch_norm(net, is_training=is_training)\n # reg_loss_ = l2_reg_val * tf.nn.l2_loss(net_in)\n # tf.losses.add_loss(reg_loss_, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n\n # Bonus local test\n # batch_size, number_of_vocabulary_tokens = net.shape\n # net_example = tf.multinomial(tf.log([[400., 1.]]), number_of_vocabulary_tokens)\n # net_example = tf.constant(numpy.random.binomial(1, .1, (3,number_of_vocabulary_tokens)), dtype='int32')\n # net_example = tf.placeholder(tf.float32, [None, number_of_vocabulary_tokens], name='x_example')\n # var_ = tf.Variable(tf.truncated_normal([number_of_vocabulary_tokens._value, 40]), name=\"w_fc1\")\n tmp = EmbeddingL2RegularizationUpdate(y, net_input, .005, l2_reg_val)\n tmp = EmbeddingL1RegularizationUpdate(y, net_input, .005, l2_reg_val)\n\n return net", "def create_CNN_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob']\n\n # Input Layer\n shape = tf.shape(feats2d) # features are of shape [max seq length for batch, 40]\n input_layer = tf.reshape(feats2d,[-1, shape[1], model_settings['feature_width']]) # [batch_size, seq_length, 40]\n\n # Convolutional Layer #1 (Dropout #1) and Pooling Layer #1\n conv1 = tf.layers.conv1d(\n inputs=input_layer,\n filters=model_settings['conv1_num_filters'],\n kernel_size=model_settings['conv1_kernel_size'],\n padding=\"same\",\n activation=tf.nn.relu)\n\n dropout1 = tf.layers.dropout(\n inputs=conv1, rate=dropout_prob, training=is_training)\n\n pool1 = tf.layers.max_pooling1d(inputs=dropout1, pool_size=model_settings['pool1_pool_size'], strides=model_settings['pool1_strides']) \n\n # Convolutional Layer #2 (Dropout #2) and Pooling Layer #2\n conv2 = tf.layers.conv1d(\n inputs=pool1,\n filters=model_settings['conv2_num_filters'],\n kernel_size=model_settings['conv2_kernel_size'],\n padding=\"same\",\n activation=tf.nn.relu)\n\n dropout2 = tf.layers.dropout(\n inputs=conv2, rate=dropout_prob, training=is_training)\n\n pool2 = tf.layers.max_pooling1d(inputs=dropout2, pool_size=model_settings['pool2_pool_size'], strides=model_settings['pool2_strides']) # [batch_size, pool2_shape[1], 64]\n\n # in case we want to use a flat output layer from convolutions\n # pool2_flat = tf.layers.flatten(pool2) # [batch_size, pool2_shape[1] * 64]\n # idem as: \n # pool2_shape = tf.shape(pool2) \n # pool2_flat = tf.reshape(pool2, [-1, pool2_shape[1] * 64]) \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n # batch_size = tf.shape(feats2d)[0] \n # feats2d = tf.reshape(feats2d, shape=[batch_size,-1,40]) # features are of shape [max seq length for batch, 40]\n # seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n # seq_lengths = tf.shape(pool2)[1]\n # seq_lengths = tf.slice(shapes, [0, 0], [batch_size, 1])\n # print(seq_lengths)\n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n # ini_fw = cell_fw.zero_state(batch_size,dtype=tf.float32)\n # ini_bw = cell_bw.zero_state(batch_size,dtype=tf.float32)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, pool2, \n dtype=tf.float32)\n\n # initial_state_fw = ini_fw, initial_state_bw = ini_bw, \n # if state_is_tuple, state is a tuple (cell_state, memory_state)\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def train_network(\n model_name, batch_size=64, epochs=100, num_units=64, sequence_length=100\n):\n network_input, network_output, vocab_size = get_train_data(sequence_length)\n print(\"vocab_size = \", vocab_size)\n\n test_files = list()\n for test_file in test_dir.glob(\"*.pkl\"):\n if not test_file.stem.startswith(\".\"):\n test_files.append(str(test_file))\n\n test_in, test_out = prepare_validation_data(test_files, sequence_length)\n\n # num_units = 256\n if model_name == \"lstm\":\n model = SingleLSTM(num_units, vocab_size).get_network(\n sequence_length=sequence_length\n )\n elif model_name == \"bi-lstm\":\n model = BiLSTM(num_units, vocab_size).get_network(\n sequence_length=sequence_length\n )\n elif model_name == \"lstm-attention\":\n model = AttentionLSTM(num_units, vocab_size).get_network(\n sequence_length=sequence_length\n )\n elif model_name == \"bi-lstm-attention\":\n model = AttentionBiLSTM(num_units, vocab_size).get_network(\n sequence_length=sequence_length\n )\n\n filename = (\n model_name + \"/model-{epoch:02d}-{loss:.4f}.hdf5\"\n ) # type(model).__name__ +\n file_path = str(model_dir / filename)\n checkpoint = ModelCheckpoint(\n file_path,\n monitor=\"loss\",\n verbose=0,\n save_best_only=True,\n save_weights_only=True,\n mode=\"min\",\n )\n model.fit(\n network_input,\n network_output,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=[checkpoint],\n validation_data=(test_in, test_out),\n )", "def run_epoch_test(session, model, verbose=False):\n # fetches = {\"ms\": model.dynamic_eval.global_ms()}\n # vals = session.run(fetches)\n # ms = vals[\"ms\"]\n # s = np.sum(np.sqrt([x for x in ms]))\n # print(s)\n\n\n\n start_time = time.time()\n losses = 0.0\n iters = 0\n\n # zeros initial state for all devices\n state = []\n for k in range(model.gpu_num):\n state.append(session.run(model.initial_state(k)))\n\n # evaluate loss and final state for all devices\n fetches = {\"loss\": model.loss}\n\n if config.dynamic_eval:\n fetches[\"update_op\"] = model.dynamic_eval.update_op()\n\n\n for k in range(model.gpu_num):\n fetches[\"final_state%d\" % k] = model.final_state(k)\n\n for step in range(model.input.epoch_size):\n # pass states between time batches\n feed_dict = {}\n for i in range(model.gpu_num):\n gpu_state = model.initial_state(i)\n for j, (c, h) in enumerate(gpu_state):\n feed_dict[c] = state[i][j].c\n feed_dict[h] = state[i][j].h\n\n vals = session.run(fetches, feed_dict)\n\n loss = vals[\"loss\"]\n\n for k in range(model.gpu_num):\n state[k] = vals[\"final_state%d\" % k]\n\n losses += loss\n iters += model.input.time_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 0:\n print(\"%.3f perplexity: %.3f bits: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(losses / iters), np.log2(np.exp(losses / iters)),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(losses / iters)", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=300,num_hidden_units_2=200,num_code_units=50):\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(\"Tetrode number: {}, Num outputs: {}\".format(tetrode_number,dataset['output_dim']))\n\n print(dataset['input_shape'])\n print(dataset['output_dim'])\n \n print(\"Making the model...\")\n network = model(dataset['input_shape'],dataset['output_dim'],num_hidden_units,num_hidden_units_2,num_code_units,(4,1))\n print(\"Done!\")\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n accuracies = []\n trainvalidation = []\n\n print(\"Begining to train the network...\")\n epochsDone = 0\n autoencoderSameLabels = []\n try:\n for i in range(NUM_EPOCHS):\n costs = []\n valid_costs = []\n\n for start, end in zip(range(0, dataset['num_examples_train'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_train'], BATCH_SIZE)):\n cost = training['train'](dataset['X_train'][start:end],dataset['y_train'][start:end])\n costs.append(cost)\n \n for start, end in zip(range(0, dataset['num_examples_valid'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_valid'], BATCH_SIZE)):\n cost = training['valid'](dataset['X_valid'][start:end],dataset['y_valid'][start:end])\n valid_costs.append(cost)\n\n\n meanValidCost = np.mean(np.asarray(valid_costs),dtype=np.float32) \n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Accuracy: {}, Training cost / validation cost: {}\".format(i+1,accuracy,meanTrainCost/meanValidCost))\n\n if(np.isnan(meanTrainCost/meanValidCost)):\n print(\"Nan value\")\n break\n\n\n # this is the test to see if the autoencoder is learning how to \n if i%10==0:\n acs = []\n for j in range(dataset['caswells_dim']):\n # print(dataset['labeled_test'][j].shape)\n codes = training['code'](dataset['labeled_test'][j])\n np.mean(np.argmax(dataset['y_test'], axis=1) == np.argmax(training['predict'](dataset['X_test']), axis=1))\n format_codes = []\n for code in codes:\n # if(j==0):\n format_codes.append(np.argmax(code))\n\n prev = sorted(format_codes)[0]\n # print(sorted(format_codes))\n k = 0\n same = [1]\n for code in sorted(format_codes)[1:]:\n if(code == prev):\n same[k] = same[k] + 1\n else:\n k+=1\n same.append(1)\n prev = code\n\n same = np.asarray(same)\n # print(same,np.argmax(same),same[np.argmax(same)],np.sum(same))\n label_acc = same[np.argmax(same)]*1.0/np.sum(same)\n acs.append(label_acc)\n print(\"Label: {}, Num examples: {}, Same label with autoencoder: {} \".format(j,dataset['labeled_test'][j].shape[0],label_acc))\n acs = np.asarray(acs)\n autoencoderSameLabels.append(np.mean(acs))\n print(\"Average agreement: {}\".format(np.mean(acs)))\n\n\n if i%50 == 0:\n ran = randint(0,dataset['num_examples_test']-20)\n now = datetime.datetime.now()\n for j in range(10):\n testing = [dataset['X_test'][ran]]\n # print(testing[0].shape)\n output = dataset['y_test'][ran].reshape((1, 200))[0]\n print(output)\n\n # print(np.arange(dataset['output_dim']))\n # print(output)\n prediction = training['predict'](testing)[0].reshape((1, 200))[0]\n print(prediction)\n # print(prediction)\n # print(testing[0][0])\n \n code = training['code'](testing).reshape((1, 50))\n\n # print(code)\n \n # plotting the figure\n\n fig = plt.figure(1)\n sub1 = fig.add_subplot(311)\n sub2 = fig.add_subplot(312)\n sub3 = fig.add_subplot(313)\n\n # add titles\n\n sub1.set_title('Desired output')\n sub2.set_title('Net output')\n sub3.set_title('Code layer output')\n\n # adding x labels\n\n sub1.set_xlabel('Time')\n sub2.set_xlabel('Time')\n sub3.set_xlabel('Code label')\n\n # adding y labels\n\n sub1.set_ylabel('Amplitude')\n sub2.set_ylabel('Amplitude')\n sub3.set_ylabel('Probability')\n\n # Plotting data\n\n # print(testing[0][0])\n # inp = []\n # for z in range(4):\n # inp += list(testing[0][0][z])\n\n\n sub1.plot(output)\n # sub1.bar(x_axis, output, width=1)\n sub1.grid(True)\n\n sub2.plot(prediction)\n sub2.grid(True)\n\n x_axis = list(np.arange(len(code[0])))\n\n # sub3.plot(code[0])\n sub3.bar(x_axis, code[0], width=1)\n # plt.show()\n\n fig.tight_layout()\n\n # plt.plot(var2)\n # fig.tight_layout()\n plt.savefig('../logs/convAuto/fig{}_{}_{}.png'.format(i,j,now), bbox_inches='tight')\n plt.close()\n \n ran += 1\n # break\n\n\n trainvalidation.append([meanTrainCost,meanValidCost])\n accuracies.append(accuracy)\n if(EARLY_STOPPING):\n if(len(accuracies) < STOPPING_RANGE):\n pass\n else:\n test = [k for k in accuracies if k < accuracy]\n if not test:\n print('Early stopping causing training to finish at epoch {}'.format(i+1))\n break\n del accuracies[0]\n accuracies.append(accuracy)\n\n epochsDone = epochsDone + 1\n\n except KeyboardInterrupt:\n pass\n\n # plt.plot(trainvalidation)\n # plt.show()\n\n if(LOG_EXPERIMENT):\n print(\"Logging the experiment details...\")\n log = dict(\n NET_TYPE = \"Conv auto encoder 2 hidden 1 code\",\n TETRODE_NUMBER = tetrode_number,\n BASENAME = BASENAME,\n NUM_EPOCHS = epochsDone,\n BATCH_SIZE = BATCH_SIZE,\n TRAIN_VALIDATION = trainvalidation,\n LEARNING_RATE = LEARNING_RATE,\n MOMENTUM = MOMENTUM,\n SAME_LABEL_AVERAGES = autoencoderSameLabels,\n ACCURACY = accuracies,\n NETWORK_LAYERS = [str(type(layer)) for layer in lasagne.layers.get_all_layers(network)],\n OUTPUT_DIM = dataset['output_dim'],\n # NETWORK_PARAMS = lasagne.layers.get_all_params_values(network)\n )\n now = datetime.datetime.now()\n filename = \"experiments/convAuto/{}_{}_{}_NUMLAYERS_{}_OUTPUTDIM_{}\".format(now,NUM_EPOCHS,NUM_HIDDEN_UNITS,len(log['NETWORK_LAYERS']),log['OUTPUT_DIM'])\n filename = re.sub(\"[^A-Za-z0-9_/,-:]\", \"\", filename)\n with open(filename,\"w\") as outfile:\n outfile.write(str(log))", "def make_attention_lstm():\n from tensorflow.keras import activations\n from tensorflow.keras import backend as K\n from tensorflow.keras import constraints, initializers, regularizers\n\n # from keras.legacy import interfaces\n from tensorflow.keras.layers import RNN, InputSpec, Layer\n\n def _time_distributed_dense(\n x,\n w,\n b=None,\n dropout=None,\n input_dim=None,\n output_dim=None,\n timesteps=None,\n training=None,\n ):\n \"\"\"Apply `y . w + b` for every temporal slice y of x.\n\n # Arguments\n x: input tensor.\n w: weight matrix.\n b: optional bias vector.\n dropout: wether to apply dropout (same dropout mask\n for every temporal slice of the input).\n input_dim: integer; optional dimensionality of the input.\n output_dim: integer; optional dimensionality of the output.\n timesteps: integer; optional number of timesteps.\n training: training phase tensor or boolean.\n # Returns\n Output tensor.\n \"\"\"\n if not input_dim:\n input_dim = K.shape(x)[2]\n if not timesteps:\n timesteps = K.shape(x)[1]\n if not output_dim:\n output_dim = K.int_shape(w)[1]\n\n if dropout is not None and 0.0 < dropout < 1.0:\n # apply the same dropout pattern at every timestep\n ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))\n dropout_matrix = K.dropout(ones, dropout)\n expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)\n x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)\n\n # collapse time dimension and batch dimension together\n x = K.reshape(x, (-1, input_dim))\n x = K.dot(x, w)\n if b is not None:\n x = K.bias_add(x, b)\n # reshape to 3D tensor\n if K.backend() == \"tensorflow\":\n x = K.reshape(x, K.stack([-1, timesteps, output_dim]))\n x.set_shape([None, None, output_dim])\n else:\n x = K.reshape(x, (-1, timesteps, output_dim))\n return x\n\n class AttentionLSTMCell(Layer):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_attention: Returns the attention vector instead of\n the internal state.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014),\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n return_attention=False,\n implementation=1,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.input_spec = [InputSpec(ndim=2)]\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.attention_activation = activations.get(attention_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.attention_initializer = initializers.get(attention_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.attention_regularizer = regularizers.get(attention_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.attention_constraint = constraints.get(attention_constraint)\n\n self.dropout = min(1.0, max(0.0, dropout))\n self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))\n self.return_attention = return_attention\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n self.implementation = implementation\n self.state_spec = [\n InputSpec(shape=(None, self.units)),\n InputSpec(shape=(None, self.units)),\n ]\n self.state_size = (self.units, self.units)\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTMCell object.\"\"\"\n if hasattr(self, \"timesteps\") and self.timesteps is not None:\n self.timestep_dim = self.timesteps\n else:\n self.timestep_dim = 1 # input_shape[0]\n\n self.input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"kernel\",\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n )\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name=\"recurrent_kernel\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n # add attention kernel\n self.attention_kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"attention_kernel\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n # add attention weights\n # weights for attention model\n self.attention_weights = self.add_weight(\n shape=(self.input_dim, self.units),\n name=\"attention_W\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n self.attention_recurrent_weights = self.add_weight(\n shape=(self.units, self.units),\n name=\"attention_U\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(shape, *args, **kwargs):\n return K.concatenate(\n [\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer(\n (self.units * 2,), *args, **kwargs\n ),\n ]\n )\n\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name=\"bias\",\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_bias = self.add_weight(\n shape=(self.units,),\n name=\"attention_b\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_recurrent_bias = self.add_weight(\n shape=(self.units, 1),\n name=\"attention_v\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n else:\n self.bias = None\n self.attention_bias = None\n self.attention_recurrent_bias = None\n\n self.kernel_i = self.kernel[:, : self.units]\n self.kernel_f = self.kernel[:, self.units : self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2 : self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3 :]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, : self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[\n :, self.units : self.units * 2\n ]\n self.recurrent_kernel_c = self.recurrent_kernel[\n :, self.units * 2 : self.units * 3\n ]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3 :]\n\n self.attention_i = self.attention_kernel[:, : self.units]\n self.attention_f = self.attention_kernel[:, self.units : self.units * 2]\n self.attention_c = self.attention_kernel[:, self.units * 2 : self.units * 3]\n self.attention_o = self.attention_kernel[:, self.units * 3 :]\n\n if self.use_bias:\n self.bias_i = self.bias[: self.units]\n self.bias_f = self.bias[self.units : self.units * 2]\n self.bias_c = self.bias[self.units * 2 : self.units * 3]\n self.bias_o = self.bias[self.units * 3 :]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n\n self.built = True\n\n def _generate_dropout_mask(self, inputs, training=None):\n if 0 < self.dropout < 1:\n ones = K.ones_like(K.squeeze(inputs[:, 0:1, :], axis=1))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._dropout_mask = None\n\n def _generate_recurrent_dropout_mask(self, inputs, training=None):\n if 0 < self.recurrent_dropout < 1:\n ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))\n ones = K.tile(ones, (1, self.units))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._recurrent_dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._recurrent_dropout_mask = None\n\n def call(self, inputs, states, training=None):\n \"\"\"Call the AttentionLSTMCell.\"\"\"\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n # alignment model\n h_att = K.repeat(h_tm1, self.timestep_dim)\n att = _time_distributed_dense(\n inputs,\n self.attention_weights,\n self.attention_bias,\n input_dim=self.input_dim,\n output_dim=self.units,\n timesteps=self.timestep_dim,\n )\n attention_ = self.attention_activation(\n K.dot(h_att, self.attention_recurrent_weights) + att\n ) # energy\n attention_ = K.squeeze(\n K.dot(attention_, self.attention_recurrent_bias), 2\n ) # energy\n\n alpha = K.exp(attention_)\n\n if dp_mask is not None:\n alpha *= dp_mask[0]\n\n alpha /= K.sum(alpha, axis=1, keepdims=True)\n alpha_r = K.repeat(alpha, self.input_dim)\n alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1))\n\n # make context vector (soft attention after Bahdanau et al.)\n z_hat = inputs * alpha_r\n # context_sequence = z_hat\n z_hat = K.sum(z_hat, axis=1)\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.0:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel_i)\n x_f = K.dot(inputs_f, self.kernel_f)\n x_c = K.dot(inputs_c, self.kernel_c)\n x_o = K.dot(inputs_o, self.kernel_o)\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias_i)\n x_f = K.bias_add(x_f, self.bias_f)\n x_c = K.bias_add(x_c, self.bias_c)\n x_o = K.bias_add(x_o, self.bias_o)\n\n if 0 < self.recurrent_dropout < 1.0:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i\n + K.dot(h_tm1_i, self.recurrent_kernel_i)\n + K.dot(z_hat, self.attention_i)\n )\n f = self.recurrent_activation(\n x_f\n + K.dot(h_tm1_f, self.recurrent_kernel_f)\n + K.dot(z_hat, self.attention_f)\n )\n c = f * c_tm1 + i * self.activation(\n x_c\n + K.dot(h_tm1_c, self.recurrent_kernel_c)\n + K.dot(z_hat, self.attention_c)\n )\n o = self.recurrent_activation(\n x_o\n + K.dot(h_tm1_o, self.recurrent_kernel_o)\n + K.dot(z_hat, self.attention_o)\n )\n else:\n if 0.0 < self.dropout < 1.0:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0.0 < self.recurrent_dropout < 1.0:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n z += K.dot(z_hat, self.attention_kernel)\n\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, : self.units]\n z1 = z[:, self.units : 2 * self.units]\n z2 = z[:, 2 * self.units : 3 * self.units]\n z3 = z[:, 3 * self.units :]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h, c]\n\n class AttentionLSTM(RNN):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n return_attention: Returns the attention vector instead of\n the internal state.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014)\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n # '@interfaces.legacy_recurrent_support\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n implementation=1,\n return_sequences=False,\n return_state=False,\n return_attention=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs,\n ):\n import warnings\n\n if implementation == 0:\n warnings.warn(\n \"`implementation=0` has been deprecated, \"\n \"and now defaults to `implementation=1`.\"\n \"Please update your layer call.\",\n stacklevel=2,\n )\n implementation = 1\n\n if K.backend() == \"cntk\":\n if not kwargs.get(\"unroll\") and (dropout > 0 or recurrent_dropout > 0):\n warnings.warn(\n \"RNN dropout is not supported with the CNTK backend \"\n \"when using dynamic RNNs (i.e. non-unrolled). \"\n \"You can either set `unroll=True`, \"\n \"set `dropout` and `recurrent_dropout` to 0, \"\n \"or use a different backend.\",\n stacklevel=2,\n )\n dropout = 0.0\n recurrent_dropout = 0.0\n\n cell = AttentionLSTMCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n attention_activation=attention_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n attention_initializer=attention_initializer,\n bias_initializer=bias_initializer,\n unit_forget_bias=unit_forget_bias,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n attention_regularizer=attention_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n attention_constraint=attention_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n return_attention=return_attention,\n implementation=implementation,\n )\n super().__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs,\n )\n self.return_attention = return_attention\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTM object.\"\"\"\n self.cell.timesteps = input_shape[1]\n self.cell.build(input_shape)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n \"\"\"Call the AttentionLSTM object.\"\"\"\n self.cell._generate_dropout_mask(inputs, training=training)\n self.cell._generate_recurrent_dropout_mask(inputs, training=training)\n return super().call(\n inputs, mask=mask, training=training, initial_state=initial_state\n )\n\n @property\n def units(self):\n \"\"\"Return property units.\"\"\"\n return self.cell.units\n\n @property\n def activation(self):\n \"\"\"Return property activation.\"\"\"\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n \"\"\"Return property recurrent_activation.\"\"\"\n return self.cell.recurrent_activation\n\n @property\n def attention_activation(self):\n \"\"\"Return property attention_activation.\"\"\"\n return self.cell.attention_activation\n\n @property\n def use_bias(self):\n \"\"\"Return property use_bias.\"\"\"\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n \"\"\"Return property kernel_initializer.\"\"\"\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n \"\"\"Return property recurrent_initializer.\"\"\"\n return self.cell.recurrent_initializer\n\n @property\n def attention_initializer(self):\n \"\"\"Return property attention_initializer.\"\"\"\n return self.cell.attention_initializer\n\n @property\n def bias_initializer(self):\n \"\"\"Return property bias_initializer.\"\"\"\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n \"\"\"Return property unit_forget_bias.\"\"\"\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n \"\"\"Return property kernel_regularizer.\"\"\"\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n \"\"\"Return property recurrent_regularizer.\"\"\"\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n \"\"\"Return property bias_regularizer.\"\"\"\n return self.cell.bias_regularizer\n\n @property\n def activity_regularizer(self):\n \"\"\"Return property activity_regularizer.\"\"\"\n return self.cell.activity_regularizer\n\n @property\n def attention_regularizer(self):\n \"\"\"Return property attention_regularizer.\"\"\"\n return self.cell.attention_regularizer\n\n @property\n def kernel_constraint(self):\n \"\"\"Return property kernel_constraint.\"\"\"\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n \"\"\"Return property recurrent_constraint.\"\"\"\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n \"\"\"Return property bias_constraint.\"\"\"\n return self.cell.bias_constraint\n\n @property\n def attention_constraint(self):\n \"\"\"Return property attention_constraint.\"\"\"\n return self.cell.attention_constraint\n\n @property\n def dropout(self):\n \"\"\"Return property dropout.\"\"\"\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n \"\"\"Return property recurrent_dropout.\"\"\"\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n \"\"\"Return property implementation.\"\"\"\n return self.cell.implementation\n\n def get_config(self):\n \"\"\"Return configuration dict of the AttentionLSTM object.\"\"\"\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Create a new AttentionLSTM object from a configuration dict.\"\"\"\n if \"implementation\" in config and config[\"implementation\"] == 0:\n config[\"implementation\"] = 1\n return cls(**config)\n\n return AttentionLSTM", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def generic_rnn__ncnn(g,\n variant,\n input,\n initial_states,\n all_weights,\n has_biases,\n num_layers,\n dropout,\n train,\n bidirectional,\n batch_first=None,\n batch_sizes=None):\n warnings.warn(\n 'Exporting a model to ONNX with a batch_size other than 1, ' +\n 'with a variable length with ' + variant + ' can cause an error ' +\n 'when running the ONNX model with a different batch size. ' +\n 'Make sure to save the model with a batch size of 1, ' +\n 'or define the initial states (h0/c0) as inputs of the model. ')\n\n onnxActivations = [\n 'Relu', 'Tanh', 'Sigmoid', 'Affine', 'LeakyRelu', 'ThresholdedRelu',\n 'ScaledTanh', 'HardSigmoid', 'Elu', 'Softsign', 'Softplus'\n ]\n variantToOnnxActivationMap = dict(\n zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations))\n weights_per_layer = 4 if has_biases else 2\n # this means that projections are used inside LSTM,\n # so need to tell user that it's not supported\n if variant == 'LSTM' and len(\n all_weights) != num_layers * weights_per_layer * (1 +\n bidirectional):\n return _unimplemented('LSTM', 'LSTMs with projections')\n assert len(all_weights) == num_layers * weights_per_layer * (1 +\n bidirectional)\n layer_weights = [\n all_weights[i:i + weights_per_layer]\n for i in range(0, len(all_weights), weights_per_layer)\n ]\n if batch_first:\n # batch, seq, feat -> seq, batch, feat\n input = g.op('Transpose', input, perm_i=[1, 0, 2])\n if dropout and train:\n return _unimplemented('RNN/GRU/LSTM', 'dropout in training mode')\n\n if variant.startswith('RNN'):\n nonlinearity = variantToOnnxActivationMap[variant[4:].lower()]\n variant = 'RNN'\n\n w_hh = all_weights[1]\n hidden_size = sym_help._get_tensor_dim_size(w_hh, 1)\n if hidden_size is None:\n return _unimplemented('RNN/GRU/LSTM', 'unknown hidden size')\n\n unidirectional = not bidirectional\n\n prev_output = input\n\n h_outs = []\n if variant == 'RNN' or variant == 'GRU':\n h0 = initial_states\n elif variant == 'LSTM':\n h0, c0 = initial_states\n c_outs = []\n\n sequence_lens = unused(g) if batch_sizes is None else batch_sizes\n\n if variant == 'GRU':\n # pytorch is reset, input, hidden\n # onnx is input, reset, hidden\n reform_permutation = [(1, 2), (0, 1), (2, 3)]\n elif variant == 'LSTM':\n # pytorch is input, forget, cell, output.\n # onnx is input, output, forget, cell.\n reform_permutation = [(0, 1), (3, 4), (1, 3)]\n\n def reform_weights(g, w, n, intervals):\n slices = [\n sym_help._slice_helper(\n g, w, axes=[0], starts=[x * n], ends=[y * n])\n for x, y in intervals\n ]\n return g.op('Concat', *slices, axis_i=0)\n\n def transform_weights_no_bias(layer_index):\n weights = layer_weights[layer_index]\n if variant == 'RNN':\n weight_ih, weight_hh = weights\n elif variant == 'GRU' or variant == 'LSTM':\n weight_ih, weight_hh = [\n reform_weights(g, w, hidden_size, reform_permutation)\n for w in weights\n ]\n return tuple(\n sym_help._unsqueeze_helper(g, x, [0])\n for x in (weight_ih, weight_hh))\n\n def transform_weights(layer_index):\n weights = layer_weights[layer_index]\n if variant == 'RNN':\n weight_ih, weight_hh, bias_ih, bias_hh = weights\n elif variant == 'GRU' or variant == 'LSTM':\n weight_ih, weight_hh, bias_ih, bias_hh = [\n reform_weights(g, w, hidden_size, reform_permutation)\n for w in weights\n ]\n bias_concat = g.op('Concat', bias_ih, bias_hh, axis_i=0)\n return tuple(\n sym_help._unsqueeze_helper(g, x, [0])\n for x in (weight_ih, weight_hh, bias_concat))\n\n def retrieve_state(x, start, end):\n return x if num_layers == 1 else sym_help._slice_helper(\n g, x, axes=[0], starts=[start], ends=[end])\n\n for i in range(num_layers):\n if unidirectional:\n if weights_per_layer == 4:\n weight_ih, weight_hh, bias_concat = transform_weights(i)\n else:\n weight_ih, weight_hh = transform_weights_no_bias(i)\n bias_concat = unused(g)\n\n state_indices = i, i + 1\n else:\n if weights_per_layer == 4:\n weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i)\n weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1)\n bias_concat = g.op('Concat', bias_f, bias_b, axis_i=0)\n else:\n weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i)\n weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1)\n bias_concat = unused(g)\n\n weight_ih = g.op('Concat', weight_ih_f, weight_ih_b, axis_i=0)\n weight_hh = g.op('Concat', weight_hh_f, weight_hh_b, axis_i=0)\n\n state_indices = 2 * i, 2 * i + 2\n\n inputs = [\n prev_output, weight_ih, weight_hh, bias_concat, sequence_lens\n ]\n\n inputs.append(retrieve_state(h0, *state_indices))\n if variant == 'LSTM':\n inputs.append(retrieve_state(c0, *state_indices))\n\n extra_kwargs = {} if unidirectional else {\n 'direction_s': 'bidirectional'\n }\n if variant == 'RNN':\n if bidirectional:\n activation = [nonlinearity, nonlinearity]\n else:\n activation = [nonlinearity]\n\n prev_output, h_out = g.op(\n 'RNN',\n *inputs,\n outputs=2,\n hidden_size_i=hidden_size,\n activations_s=activation,\n **extra_kwargs)\n elif variant == 'GRU':\n prev_output, h_out = g.op(\n 'GRU',\n *inputs,\n outputs=2,\n hidden_size_i=hidden_size,\n linear_before_reset_i=1,\n **extra_kwargs)\n elif variant == 'LSTM':\n # g.op will add some node to h0 and c0,\n # which is not necessary for us\n prev_output, h_out, c_out = g.op(\n 'ncnn::LSTM',\n *inputs,\n outputs=3,\n hidden_size_i=hidden_size,\n **extra_kwargs)\n if bidirectional:\n # The ONNX RNN/GRU/LSTM produce an output of dimensions\n # seq_len, num_directions, batch, hidden_size\n # We have to convert to match pytorch's expected\n # seq_len, batch, num_directions * hidden_size\n # by first moving num_directions before hidden_size with\n # Transpose, and then combining it with hidden_size\n # with Reshape.\n prev_output = g.op('Transpose', prev_output, perm_i=[0, 2, 1, 3])\n prev_output = g.op(\n 'Reshape', prev_output,\n g.op('Constant', value_t=torch.LongTensor([0, 0, -1])))\n else:\n prev_output = sym_help._squeeze_helper(g, prev_output, [1])\n\n h_outs.append(h_out)\n if variant == 'LSTM':\n c_outs.append(c_out)\n if batch_first:\n # seq, batch, num_directions * hidden_size -> batch, seq,\n # num_directions * hidden_size\n prev_output = g.op('Transpose', prev_output, perm_i=[1, 0, 2])\n h_outs = h_out if num_layers == 1 else g.op('Concat', *h_outs, axis_i=0)\n if variant == 'RNN' or variant == 'GRU':\n return prev_output, h_outs\n elif variant == 'LSTM':\n c_outs = c_out if num_layers == 1 else g.op(\n 'Concat', *c_outs, axis_i=0)\n return prev_output, h_outs, c_outs", "def forward(self, frames):\n # global low_level_representation, semantic_representation\n batch_size, time_steps, height, width, channels = frames.get_shape()\n\n with tf.name_scope(name=self.name):\n if self.encoder_mode == 'ConvLSTM':\n conv_lstm_cell = cell.ConvLSTMCell(shape=[height, width], filters=channels, kernel=[3, 3], normalize=True)\n fw_output, fw_state = rnn_inference(frames, conv_lstm_cell, 'forward_bilstm', False, 'conv')\n bw_output, bw_state = rnn_inference(frames, conv_lstm_cell, 'backward_bilstm', True, 'conv')\n # aggregate the output\n encoder_output = fw_output + bw_output # (128, 32, 224, 224, 6)\n\n # construct encoder state, but check whether it is valid to directly add the cell and hidden\n encoder_cell = fw_state[0] + bw_state[0]\n encoder_hidden = fw_state[1] + bw_state[1]\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(encoder_cell, encoder_hidden)\n print(encoder_state)\n\n # use 3d convolutional block before the bidirectional lstm\n elif self.encoder_mode == 'conv2lstm':\n conv_out = cnn_inference(frames, name='conv') # [batch, time, 512]\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_units)\n fw_output, fw_state = rnn_inference(conv_out, lstm_cell, 'forward_bilstm', False, 'linear')\n bw_output, bw_state = rnn_inference(conv_out, lstm_cell, 'backward_bilstm', True, 'linear')\n\n # aggregate the output\n encoder_output = fw_output + bw_output # (128, 32, 1200)\n\n # construct encoder state, but check whether it is valid to directly add the cell and hidden\n encoder_cell = fw_state[0] + bw_state[0]\n encoder_hidden = fw_state[1] + bw_state[1]\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(encoder_cell, encoder_hidden)\n\n elif self.encoder_mode == 'C3D':\n pass\n\n return encoder_output, encoder_state", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6, 6, 6)\n x_b = randn(3, 2, 4, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6, 6)\n x_b = randn(3, 2, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )" ]
[ "0.7174004", "0.6981176", "0.6942284", "0.66725725", "0.65949506", "0.6501579", "0.6480307", "0.6405576", "0.6309258", "0.6258608", "0.62536645", "0.6212986", "0.6177987", "0.61688703", "0.6160826", "0.6120184", "0.6105675", "0.6078574", "0.6066392", "0.60473734", "0.60453326", "0.6045287", "0.6001264", "0.60004294", "0.5976831", "0.59621286", "0.59295", "0.5912783", "0.59098107", "0.5904117", "0.58859783", "0.58815885", "0.58745676", "0.5858895", "0.5852876", "0.5822271", "0.5814758", "0.5801058", "0.58000207", "0.57825226", "0.57450783", "0.574123", "0.57251984", "0.5712929", "0.57126445", "0.57126325", "0.57038015", "0.5700401", "0.5665262", "0.5663031", "0.56629574", "0.56626475", "0.5662619", "0.5662357", "0.5661423", "0.56542224", "0.56528074", "0.5625986", "0.5617321", "0.5607482", "0.56003153", "0.55949235", "0.5593223", "0.55869037", "0.55864924", "0.55854636", "0.55854636", "0.5580893", "0.55787235", "0.55779564", "0.5576369", "0.5564488", "0.55621606", "0.55619675", "0.55603665", "0.5559296", "0.5554304", "0.55528975", "0.5548657", "0.55430275", "0.55425996", "0.5539747", "0.5536123", "0.55263585", "0.5522499", "0.55182606", "0.5516852", "0.5514998", "0.55059105", "0.5503718", "0.5503021", "0.55023485", "0.55004954", "0.5492352", "0.54923135", "0.5491358", "0.5483854", "0.5482687", "0.54825634", "0.54814416" ]
0.6794371
3
Test GRU gnmt encoder. time_major=True
def runGRUEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, sequence_inputs=inputs_ph, sequence_length=inputs_length_ph) num_bi_layers = 1 num_uni_layers = num_layers - num_bi_layers if num_uni_layers == 1: states_bi_bw, states_uni = states # states_bi_bw = (states_bi_bw,) self.assertEqual(1, len(states_bi_bw)) self.assertEqual(num_uni_layers, len(states_uni)) # unlike lstm, whose states is a tuple of (c,h), # gru states has only one element # states_bi_bw[0] is a states tensor states_list = [states_bi_bw[0]] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) else: states_uni = states self.assertEqual(num_uni_layers, len(states_uni)) states_list = [] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) inputs, inputs_length = common_utils.get_encoder_test_inputs() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs, states = sess.run( [outputs, states], feed_dict={ inputs_ph: inputs, inputs_length_ph: inputs_length }) self.assertAllEqual( [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH], outputs.shape) if num_uni_layers == 1: self.assertEqual(num_layers, len(states)) self.assertAllEqual( [num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape) else: self.assertEqual(num_uni_layers, len(states)) self.assertAllEqual( [num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golay_module1(self):\r\n sent = golay.encode([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\r\n rec = sent[:-1] + 'C' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)\r\n rec = sent[:-1] + 'T' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)", "def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)", "def test_generate_raw(self):\n raw_result = self.raw_test_particle.generate_raw()\n decoded_raw = json.loads(raw_result)\n \n driver_time = decoded_raw[\"driver_timestamp\"]\n self.sample_raw_particle[\"driver_timestamp\"] = driver_time\n \n # run it through json so unicode and everything lines up\n standard = json.dumps(self.sample_raw_particle, sort_keys=True)\n self.assertEqual(raw_result, standard)", "def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')", "def test_standard_tonnetz():\n run_framesync(Tonnetz)", "def test_UniformTime_repr():", "def test_save_tsc_old_version(uvm_nano):\n uvm_nano.start()\n uvm_nano.snapshot_full(target_version=\"0.24.0\")\n uvm_nano.check_log_message(\"Saving to older snapshot version, TSC freq\")", "def create_training_record(data_path: str, path_to_gt: str, ratio: float):\n with open(data_path) as file:\n data = json.load(file)\n\n base = os.path.join(os.path.dirname(data_path), datetime.now().strftime('%Y_%m_%d_%H%M%S'))\n train_filename = '{}_{}'.format(base, 'train.tfrecords')\n test_filename = '{}_{}'.format(base, 'test.tfrecords')\n\n train_writer = tf.python_io.TFRecordWriter(train_filename)\n test_writer = tf.python_io.TFRecordWriter(test_filename)\n\n gt_reader = GroundTruthReader(path_to_gt)\n train_set_len = 1\n test_set_len = 1\n zeros = 0\n ones = 0\n\n for i, key in enumerate(data):\n if not i % 1000:\n print('Data: {}/{}'.format(i, len(data)))\n sys.stdout.flush()\n\n features, labels = compute_feature(key, data[key], gt_reader)\n\n for j, feat in enumerate(features):\n label = labels[j]\n if test_set_len / train_set_len >= ratio:\n # balance out training dataset (there are normally more zero- than one-labels)\n if (label == 0.0 and (zeros - ones <= 0)) or label == 1.0:\n train_set_len += 1\n if label == 1.0:\n ones += 1\n else:\n zeros += 1\n feature = {'train/feature': float_feature(feat),\n 'train/label': float_feature(labels[j])}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n train_writer.write(example.SerializeToString())\n else:\n test_set_len += 1\n feature = {'test/feature': float_feature(feat),\n 'test/label': float_feature(labels[j])}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n test_writer.write(example.SerializeToString())\n train_writer.close()\n sys.stdout.flush()", "def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5", "def mkrngs(self):\n self.bkg[[0, -1]] = False\n bkgr = self.Time[self.bkg ^ np.roll(self.bkg, -1)]\n self.bkgrng = np.reshape(bkgr, [bkgr.size // 2, 2])\n\n self.sig[[0, -1]] = False\n sigr = self.Time[self.sig ^ np.roll(self.sig, 1)]\n self.sigrng = np.reshape(sigr, [sigr.size // 2, 2])\n\n self.trn[[0, -1]] = False\n trnr = self.Time[self.trn ^ np.roll(self.trn, 1)]\n self.trnrng = np.reshape(trnr, [trnr.size // 2, 2])", "def compile_gru_model(input_dim=101, output_dim=4563, recur_layers=3, nodes=1000,\n conv_context=11, conv_border_mode='valid', conv_stride=2,\n initialization='glorot_uniform', batch_norm=True, num_gpu=1):\n logger.info(\"Building gru model\")\n # Main acoustic input\n acoustic_input = Input(shape=(None, input_dim), name='acoustic_input')\n\n # Setup the network\n #conv_1d = Conv1D(nodes, conv_context, name='conv_1d',\n # padding='same', strides=conv_stride,\n # kernel_initializer=initialization,\n # activation='relu')(acoustic_input)\n conv_1d = Convolution1D(nodes, conv_context, name='conv1d',\n border_mode=conv_border_mode,\n subsample_length=conv_stride, init=initialization,\n activation='relu')(acoustic_input)\n if batch_norm:\n output = normalization.BatchNormalization(name='bn_conv_1d')(conv_1d, training=True)\n else:\n output = conv_1d\n\n for r in range(recur_layers):\n # output = GRU(nodes, activation='relu',\n # name='rnn_{}'.format(r + 1), init=initialization,\n # return_sequences=True)(output)\n output = Bidirectional(GRU(nodes, return_sequences=True),name='bi_lstm_{}'.format(r + 1))(output)\n if batch_norm:\n bn_layer = normalization.BatchNormalization(name='bn_rnn_{}'.format(r + 1),moving_mean_initializer='zeros')\n output = bn_layer(output, training=True)\n\n network_output = TimeDistributed(Dense(\n output_dim+1, name='dense', activation='softmax', init=initialization,\n ))(output)\n model = Model(input=acoustic_input, output=network_output)\n #model.conv_output_length = lambda x: conv_output_length(\n # x, conv_context, conv_border_mode, conv_stride)\n # model = ParallelModel(model, num_gpu)\n return model", "async def test_floating_point_encoding(self, r):\n await r.flushdb()\n timestamp = 1349673917.939762\n await r.zadd('a', timestamp, 'a1')\n assert await r.zscore('a', 'a1') == timestamp", "def test_transcoder(self, raw, value):\n assert DPTSceneNumber.to_knx(value) == DPTArray(raw)\n assert DPTSceneNumber.from_knx(DPTArray(raw)) == value", "def test_socialledge_encode_decode_mux_0(self):\n\n db = cantools.db.File()\n filename = os.path.join('tests', 'files', 'socialledge.dbc')\n db.add_dbc_file(filename)\n\n frame_id = 200\n data = {\n 'SENSOR_SONARS_mux': 0,\n 'SENSOR_SONARS_err_count': 1,\n 'SENSOR_SONARS_left': 2,\n 'SENSOR_SONARS_middle': 3,\n 'SENSOR_SONARS_right': 4,\n 'SENSOR_SONARS_rear': 5\n }\n\n encoded = db.encode_message(frame_id, data)\n self.assertEqual(encoded, b'\\x10\\x00\\x14\\xe0\\x01( \\x03')\n\n decoded = db.decode_message(frame_id, encoded)\n self.assertEqual(decoded, data)", "def test_gendaymtx():\n # sky.gendaymtx(\n # sun_mtx, 6, data=wea_data, meta=wea_metadata, direct=True, onesun=True\n # )\n pass", "def ug(micrograms):\n return Unit(micrograms,\"microgram\")", "def testInitialize(self):\n golang_epoch = golang_time.GolangTimeEpoch()\n self.assertIsNotNone(golang_epoch)", "def swiss_to_gts(v):\n return v - np.array([667400, 158800, 1700])", "def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4", "def RLenc(img,order='F',format=True):\n bytes = img.reshape(img.shape[0] * img.shape[1], order=order)\n runs = [] ## list of run lengths\n\n # RLeC < 30 Drop\n gt0 = np.where(bytes > 0)[0]\n #print(\"RlenC=%d\",len(gt0))\n #if len(gt0) < 3600 or len(gt0) > 16000: # 70x70 consider as empty\n if len(gt0) < 3600: # 70x70 consider as empty\n return '' \n\n r = 0 ## the current run length\n pos = 1 ## count starts from 1 per WK\n for c in bytes:\n if ( c == 0 ):\n if r != 0:\n runs.append((pos, r))\n pos+=r\n r=0\n pos+=1\n else:\n r+=1\n\n #if last run is unsaved (i.e. data ends with 1)\n if r != 0:\n runs.append((pos, r))\n pos += r\n r = 0\n\n if format:\n z = ''\n \n for rr in runs:\n #if rr[1] > 1: # drop single point \n z+='{} {} '.format(rr[0],rr[1])\n return z[:-1]\n else:\n return runs", "def test_encoder(device='/gpu:0'):\n tf.reset_default_graph()\n B, H, W, C = 64, 256, 256, 1\n latent_dim = 16\n with tf.device(device):\n gray_imgs = tf.zeros((B, H, W, C))\n latent_samples, latent_mean, latent_sd = encoder(gray_imgs, latent_dim)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n latent_samples_np, latent_mean_np, latent_sd_np = sess.run([latent_samples, latent_mean, latent_sd])\n print('Output shape should be (%d, %d)' % (B, latent_dim))\n print('latent_samples shape: ' + str(latent_samples_np.shape))\n print('latent_mean shape: ' + str(latent_mean_np.shape))\n print('latent_sd shape: ' + str(latent_sd_np.shape))", "def gnss_ins_sim_recorder():\n # ensure gnss_ins_sim_node is unique:\n rospy.init_node('gnss_ins_sim_recorder_node')\n\n # parse params:\n motion_def_name = rospy.get_param('/gnss_ins_sim_recorder_node/motion_file')\n sample_freq_imu = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/imu')\n sample_freq_gps = rospy.get_param('/gnss_ins_sim_recorder_node/sample_frequency/gps')\n topic_name_imu = rospy.get_param('/gnss_ins_sim_recorder_node/imu_topic_name')\n topic_name_odom = rospy.get_param('/gnss_ins_sim_recorder_node/odom_gt_topic_name')\n rosbag_output_path = rospy.get_param('/gnss_ins_sim_recorder_node/output_path')\n rosbag_output_name = rospy.get_param('/gnss_ins_sim_recorder_node/output_name')\n gt_output_path = rospy.get_param(\"gnss_ins_sim_recorder_node/groundtruth_output_path\")\n gt_output_name = rospy.get_param(\"gnss_ins_sim_recorder_node/groundtruth_output_name\")\n\n # generate simulated data:\n motion_def_path = os.path.join(\n rospkg.RosPack().get_path('gnss_ins_sim'), 'config', 'motion_def', motion_def_name\n )\n imu_simulator = get_gnss_ins_sim(\n # motion def file:\n motion_def_path,\n # gyro-accel/gyro-accel-mag sample rate:\n sample_freq_imu,\n # GPS sample rate:\n sample_freq_gps\n )\n # gt_output_file = open(os.path.join(gt_output_path, gt_output_name), 'w')\n with rosbag.Bag(\n os.path.join(rosbag_output_path, rosbag_output_name), 'w'\n ) as bag:\n # get timestamp base:\n timestamp_start = rospy.Time.now()\n origin_pos = [0.0, 0.0, 0.0]\n for i, measurement in enumerate(imu_simulator):\n if i == 0:\n origin_pos = [measurement['data']['ref_pos_x'],\n measurement['data']['ref_pos_y'],\n measurement['data']['ref_pos_z']]\n # init:\n msg_imu = Imu()\n timestamp = timestamp_start + rospy.Duration.from_sec(measurement['stamp'])\n # a. set header:\n msg_imu.header.frame_id = 'NED'\n msg_imu.header.stamp = timestamp\n # b. set orientation estimation:\n msg_imu.orientation.x = 0.0\n msg_imu.orientation.y = 0.0\n msg_imu.orientation.z = 0.0\n msg_imu.orientation.w = 1.0\n # c. gyro:\n msg_imu.angular_velocity.x = measurement['data']['gyro_x']\n msg_imu.angular_velocity.y = measurement['data']['gyro_y']\n msg_imu.angular_velocity.z = measurement['data']['gyro_z']\n msg_imu.linear_acceleration.x = measurement['data']['accel_x']\n msg_imu.linear_acceleration.y = measurement['data']['accel_y']\n msg_imu.linear_acceleration.z = measurement['data']['accel_z']\n\n msg_odom = Odometry()\n msg_odom.header.stamp = timestamp\n msg_odom.header.frame_id = 'inertial'\n\n # b.set child frame id:\n msg_odom.child_frame_id = 'inertial'\n\n # b.set orientation:\n msg_odom.pose.pose.orientation.x = measurement['data']['ref_att_quat_q1']\n msg_odom.pose.pose.orientation.y = measurement['data']['ref_att_quat_q2']\n msg_odom.pose.pose.orientation.z = measurement['data']['ref_att_quat_q3']\n msg_odom.pose.pose.orientation.w = measurement['data']['ref_att_quat_q0']\n\n # c.set position:\n msg_odom.pose.pose.position.x = measurement['data']['ref_pos_x'] - origin_pos[0]\n msg_odom.pose.pose.position.y = measurement['data']['ref_pos_y'] - origin_pos[1]\n msg_odom.pose.pose.position.z = measurement['data']['ref_pos_z'] - origin_pos[2]\n\n # d.set velocity:\n msg_odom.twist.twist.linear.x = measurement['data']['ref_vel_x']\n msg_odom.twist.twist.linear.y = measurement['data']['ref_vel_y']\n msg_odom.twist.twist.linear.z = measurement['data']['ref_vel_z']\n # write:\n bag.write(topic_name_imu, msg_imu, msg_imu.header.stamp)\n bag.write(topic_name_odom, msg_odom, msg_odom.header.stamp)\n\n # gt_output_file.write(\"{0} {1} {2} {3} {4} {5} {6} {7}\\n\"\n # .format(timestamp,\n # msg_odom.pose.pose.position.x,\n # msg_odom.pose.pose.position.y,\n # msg_odom.pose.pose.position.z,\n # msg_odom.pose.pose.orientation.x,\n # msg_odom.pose.pose.orientation.y,\n # msg_odom.pose.pose.orientation.z,\n # msg_odom.pose.pose.orientation.w))\n # gt_output_file.close()", "def time_encode(self):\n for ii in range(100):\n for fragment in self.msg.encode_msg(1, 16382):\n pass", "def rc4_prga(r, t: int):\n w = 256\n i = j = 0\n s = BitArray()\n\n print(\"CHANGE THE STREAM LENGTH HERE !!!\")\n t = t // 8\n\n for l in range(t):\n i = (i + 1) % w\n j = (j + r[i]) % w\n r[i], r[j] = r[j], r[i]\n\n k = r[(r[i] + r[j]) % w]\n s += Bits(bytearray(k.to_bytes(1, byteorder='big')))\n\n debug(True, fun_name + \" : stream = \" + str(s))\n return s", "def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n # Encoder GRU\n encoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy'])\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model", "def test_pretrained():\n\n model = get_model()\n nums = generate_numbers()\n b = get_B(base=10, precision=[7, -9], size=1)[0]\n X = np.zeros((len(nums), 2 * len(b)))\n Y = np.zeros((len(nums), 1))\n\n for i, num in enumerate(nums):\n X[i] = encode(num, b)\n Y[i][0] = num\n\n loss = model.evaluate(x=X, y=Y)\n\n assert loss < 1e-5", "def test_tte5_short_write_tile_signature(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename, short_sig=True)\n self.assertTrue(True)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)", "def test_enc_FOR_MOTHER_RUSSIA(self):\n # test machine\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['N','O','C'], reflector=reflectors.B)\n\n # set state\n e1.encrypt('MLD')\n \n # assert encryption output\n self.assertEqual(e1._buffer.decode(), 'DOR')\n\n\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['R','O','D'], reflector=reflectors.B)\n\n\n # set state\n e1.encrypt('UMDPQ CUAQN LVVSP IARKC TTRJQ KCFPT OKRGO ZXALD RLPUH AUZSO SZFSU GWFNF DZCUG VEXUU LQYXO TCYRP SYGGZ HQMAG PZDKC KGOJM MYYDD H')\n\n print(e1._buffer.decode())\n\n self.assertEqual(e1._buffer.decode(), \"GROUP SOUTH COMMA NDFRO MGENP AULUS XSIXT HARMY ISENC IRCLE DXOPE RATIO NBLAU FAILE DXCOM MENCE RELIE FOPER ATION IMMED IATEL Y\")", "def test_gregorian_mismatch(self):\n date = datetime(2017, 1, 1)\n prices = [2] * 7\n expected_sequence = 27\n expected_week = 52\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def test_GBL_tau_star():\n z = 1.0\n\n # Fully ionized H and He\n x_ionH = 1.0\n x_ionHe = 2.0\n\n cosmo = {}\n cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.65\n cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.\n cosmo['Y_He'] = 0.24\n cd.set_omega_k_0(cosmo)\n\n tau_inst, tau_star = cr.optical_depth_instant(z, \n x_ionH=x_ionH, \n x_ionHe=x_ionHe, \n return_tau_star=True,\n **cosmo)\n print(\"tau_star = %.7f\" % (tau_star))\n print(\"tau_star/(h Omega_b) = %.7f =? 0.061\" % \n (tau_star / (cosmo['h'] * cosmo['omega_b_0'])))\n\n ntest.assert_approx_equal(tau_star / (cosmo['h'] * cosmo['omega_b_0']),\n 0.061,\n 2)\n\n print(\"(1 - Y_He/2) = %.3f =? 0.88\" % (1. - (cosmo['Y_He']/2.)))\n ntest.assert_approx_equal((1. - (cosmo['Y_He']/2.)),\n 0.88,\n 7)\n\n H_0 = cc.H100_s * cosmo['h']\n\n # s^-1 * Mpc s^-1 * Mpc^2 / Mpc^3 msun^-1 s^-2 / Msun -> \n tau_star_explicit = ((1. - (cosmo['Y_He']/2.)) * \n ((3. * H_0 * cosmo['omega_b_0'] * cc.c_light_Mpc_s *\n cc.sigma_T_Mpc) / \n (8. * math.pi * cc.G_const_Mpc_Msun_s * \n (cc.m_p_g/cc.M_sun_g))))\n\n print(\"tau_star_explicit = %.7f =? tau_star\" % (tau_star_explicit))\n ntest.assert_approx_equal(tau_star, tau_star_explicit, 3)", "def test_dx10_bc7_unorm_srgb():\n\n with Image.open(TEST_FILE_DX10_BC7_UNORM_SRGB) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"RGBA\"\n assert im.size == (16, 16)\n assert im.info[\"gamma\"] == 1 / 2.2\n\n assert_image_equal_tofile(\n im, TEST_FILE_DX10_BC7_UNORM_SRGB.replace(\".dds\", \".png\")\n )", "def test_socialledge_encode_decode_mux_1(self):\n\n db = cantools.db.File()\n filename = os.path.join('tests', 'files', 'socialledge.dbc')\n db.add_dbc_file(filename)\n\n frame_id = 200\n data = {\n 'SENSOR_SONARS_mux': 1,\n 'SENSOR_SONARS_err_count': 2,\n 'SENSOR_SONARS_no_filt_left': 3,\n 'SENSOR_SONARS_no_filt_middle': 4,\n 'SENSOR_SONARS_no_filt_right': 5,\n 'SENSOR_SONARS_no_filt_rear': 6\n }\n\n encoded = db.encode_message(frame_id, data)\n self.assertEqual(encoded, b'!\\x00\\x1e\\x80\\x022\\xc0\\x03')\n\n decoded = db.decode_message(frame_id, encoded)\n self.assertEqual(decoded, data)", "def main():\n timestamps = [1496163646, 1496163676, 1496163706, 1496163735, 1496163765]\n print(f\"Timestamps: {timestamps}\")\n ts = timestamp_encode(timestamps)\n print(f\"Timestamp Encoding: {ts}\")\n d = delta_encode(timestamps)\n print(f\"Delta Encoding: {d}\")\n dod = delta_of_delta_encode(timestamps)\n print(f\"Delta of Delta Encoding: {dod}\")", "def test_new_gsm():\n f = Level3File(get_test_data('nids/KDDC-gsm.nids'))\n\n assert f.gsm_additional.vcp_supplemental == ['AVSET', 'SAILS', 'RxR Noise', 'CBT']\n assert f.gsm_additional.supplemental_cut_count == 2\n truth = [False] * 16\n truth[2] = True\n truth[5] = True\n assert f.gsm_additional.supplemental_cut_map == truth\n assert f.gsm_additional.supplemental_cut_map2 == [False] * 9\n\n # Check that str() doesn't error out\n assert str(f)", "def test_sample_rate(self):\n test_sample_rate = 48000\n self.encoder._sample_rate = test_sample_rate\n self.assertEqual(self.encoder._sample_rate, test_sample_rate)", "def test_coded_freq(self):\n self.assertAlmostEqual(self.g.coded_freq(), 10 / 18)", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def test_gen_raw_sfreq():\n raw = _generate_raw(sfreq=100.)\n\n assert_equal(raw.info['sfreq'], 100.)\n\n raw = _generate_raw(sfreq=256.)\n\n assert_equal(raw.info['sfreq'], 256.)", "def build_test_time_data_augmentation(x):\n x_rot_90 = tf.contrib.image.rotate(x, 90)\n x_rot_180 = tf.contrib.image.rotate(x, 180)\n x_rot_270 = tf.contrib.image.rotate(x, 270)\n\n x_flip = tf.reverse(x, [2])\n x_flip_rot_90 = tf.contrib.image.rotate(x_flip, 90)\n x_flip_rot_180 = tf.contrib.image.rotate(x_flip, 180)\n x_flip_rot_270 = tf.contrib.image.rotate(x_flip, 270)\n\n x = tf.concat(\n [\n x, x_rot_90, x_rot_180, x_rot_270, x_flip, x_flip_rot_90,\n x_flip_rot_180, x_flip_rot_270\n ],\n axis=0)\n\n return x", "def test_export_raw_pybv(tmp_path, meas_date, orig_time, ext):\n pytest.importorskip(\"pybv\")\n raw = read_raw_fif(fname_raw, preload=True)\n raw.apply_proj()\n\n raw.set_meas_date(meas_date)\n\n # add some annotations\n annots = Annotations(\n onset=[3, 6, 9, 12, 14], # seconds\n duration=[1, 1, 0.5, 0.25, 9], # seconds\n description=[\n \"Stimulus/S 1\",\n \"Stimulus/S2.50\",\n \"Response/R101\",\n \"Look at this\",\n \"Comment/And at this\",\n ],\n ch_names=[(), (), (), (\"EEG 001\",), (\"EEG 001\", \"EEG 002\")],\n orig_time=orig_time,\n )\n raw.set_annotations(annots)\n\n temp_fname = tmp_path / (\"test\" + ext)\n with pytest.warns(RuntimeWarning, match=\"'short' format. Converting\"):\n raw.export(temp_fname)\n raw_read = read_raw_brainvision(str(temp_fname).replace(\".eeg\", \".vhdr\"))\n assert raw.ch_names == raw_read.ch_names\n assert_allclose(raw.times, raw_read.times)\n assert_allclose(raw.get_data(), raw_read.get_data())", "def _CreateRaw_T(data, y = None):\r\n ch_names = [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\", \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\",\r\n \"O1\", \"O2\"]\r\n # ch_names=[\"FP1\", \"FP2\", \"F7\", \"F3\", \"FZ\", \"F4\", \"F8\", \"T3\", \"C3\", \"CZ\", \"C4\", \"T4\", \"T5\", \"P3\", \"PZ\", \"P4\", \"T6\", \"O1\", \"O2\"]\r\n ch_types = ['eeg'] * len(ch_names)\r\n sfreq = 128\r\n montage = 'standard_1020'\r\n info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types, montage=montage, verbose=False)\r\n if data.ndim > 2:\r\n if (y is None):\r\n events = None\r\n event_id = None\r\n else:\r\n timestamps = np.arange(0, data.shape[0] * (data.shape[2]), data.shape[2]) # artifical timestamps\r\n events = np.concatenate((timestamps.reshape(-1, 1),\r\n np.zeros(timestamps.shape).astype(int).reshape(-1, 1),\r\n y.reshape(-1, 1)), axis=1)\r\n event_id = {\"control\": 0, \"tinnitus\": 1}\r\n raw = mne.EpochsArray(data, info, events=events, event_id=event_id, verbose=False)\r\n else:\r\n raw = mne.io.RawArray(data, info, verbose=False)\r\n return raw", "def gru_seq2seq_internal(inputs, targets, hparams, train):\n with tf.variable_scope(\"gru_seq2seq\"):\n if inputs is not None:\n inputs_length = common_layers.length_from_embedding(inputs)\n # Flatten inputs.\n inputs = common_layers.flatten4d3d(inputs)\n inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)\n _, final_encoder_state = gru(inputs, inputs_length, hparams, train,\n \"encoder\")\n else:\n final_encoder_state = None\n\n shifted_targets = common_layers.shift_right(targets)\n # Add 1 to account for the padding added to the left from shift_right\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\n decoder_outputs, _ = gru(\n common_layers.flatten4d3d(shifted_targets),\n targets_length,\n hparams,\n train,\n \"decoder\",\n initial_state=final_encoder_state)\n return tf.expand_dims(decoder_outputs, axis=2)", "def test_gsm(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.gsm\")\n sox_utils.gen_audio_file(path, sample_rate=sample_rate, num_channels=num_channels, duration=duration)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 0\n assert info.encoding == \"GSM\"", "def rrsigtime2string(rrsig_time):\n return time.strftime(\"%Y-%m-%d-%H:%M\", time.gmtime(rrsig_time))", "def complete_gst(gst_in, gga_in):\n\n gst_out = gst_in.split(',')\n# gga_in = gga_in.split(',')\n# hdop = np.float(gga_in[8])\n# hdop = 0.05\n q=1\n gst_out[2] = str(0.006*q)\n gst_out[3] = str(float(gst_out[6])*q)\n gst_out[4] = str(float(gst_out[7])*q)\n gst_out[6] = str(float(gst_out[6])*q)\n gst_out[7] = str(float(gst_out[7])*q)\n gst_out[5] = str(270.0)\n gst = ','.join(gst_out)\n \n # Apply new checksum :\n gst = gst[:-4] + checksum(gst) + gst[-2:]\n\n return gst", "def wrap_encode(train_test_tuple):\n train, test = train_test_tuple\n scaler = StandardScaler()\n log_amount_train = np.log10(.1 + train['Amount']).values.reshape(-1, 1)\n train['normalized_Amount'] = scaler.fit_transform(log_amount_train)\n log_amount_test = np.log10(.1 + test['Amount']).values.reshape(-1, 1)\n test['normalized_Amount'] = scaler.transform(log_amount_test)\n\n train = train.drop(['Time', 'Amount'], axis=1)\n x_train = train.drop('Class', axis=1)\n\n test = test.drop(['Time', 'Amount'], axis=1)\n x_test = test.drop('Class', axis=1)\n\n autoencoder = fit_autoencoder(x_train)\n\n encoded_x_test = autoencoder.encoder(np.asarray(x_test)).numpy()\n\n new_encoded_test = pd.DataFrame(encoded_x_test)\n new_encoded_test['y'] = test['Class'].values\n return new_encoded_test", "def ComputeGMST(GPSTime):\n\n # Difference in Julian Date between GPSTime and the J2000.0 epoch\n # Subtract half a day as Julian Days start at noon\n D = np.round((GPSTime - EpochJ2000_0_UTC)/secPerDay) - 0.5\n\n # Number of Julian centuries since J2000.0 epoch\n T = D/36525\n\n # 1st approximation to GMST without corrections for leap seconds\n GMST0 = 6.697374558 + 2400.051336*T + 0.000025862*T*T\n # Keep within range [0,24]hrs\n GMST0 = np.mod(GMST0,24)\n\n # Corrections for leap seconds\n UTCSec = GPSTime - EpochJ2000_0_UTC - secPerDay/2 - \\\n LeapSeconds_2012_EpochJ2000\n UTCHr = np.mod(UTCSec/3600,24)\n\n # Add corrections and keep within [0,24]hr range\n GMST = GMST0 + UTCHr*1.002737909\n GMST = np.mod(GMST,24)\n\n # Convert from hours to degrees to radians\n GMST *= 15.*(np.pi/180.)\n\n return GMST", "def test_encode():", "def test_encode():\n enig = Enigma(534, 16, 8, [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n string = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n encoded = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n\n assert_equal(encoded, enig.encode(string))\n\n endsettings = [5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(endsettings, enig.getrotsettings())", "def tgn_gnn(\n time_embedding_size: int,\n dropout: float,\n input: tf.Tensor,\n last_update: tf.Tensor,\n edge_idx: tf.Tensor,\n edge_times: tf.Tensor,\n edge_features: tf.Tensor,\n) -> tf.Tensor:\n n_nodes, n_features = assert_shape(input, (None, None))\n assert_shape(last_update, (n_nodes, ))\n _, n_edges = assert_shape(edge_idx, (2, None))\n assert_shape(edge_times, (n_edges, ))\n assert_shape(edge_features, (n_edges, None))\n\n dt = tf.gather(last_update, edge_idx[0]) - edge_times\n time_encoding = time_encoder(tf.cast(dt, tf.float32), time_embedding_size,\n input.dtype)\n return transformer_conv(\n int(n_features),\n n_heads=2,\n dropout=dropout,\n nodes=input,\n edge_idx=edge_idx,\n edges=tf.concat([edge_features, time_encoding], axis=1),\n )", "def test_gbt_with_sio_tf_and_zpk(self):\n # State space coefficients for the continuous SIO system.\n A = -1.0\n B = 1.0\n C = 1.0\n D = 0.5\n\n # The continuous transfer function coefficients.\n cnum, cden = ss2tf(A, B, C, D)\n\n # Continuous zpk representation\n cz, cp, ck = ss2zpk(A, B, C, D)\n\n h = 1.0\n alpha = 0.25\n\n # Explicit formulas, in the scalar case.\n Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)\n Bd = h * B / (1 - alpha * h * A)\n Cd = C / (1 - alpha * h * A)\n Dd = D + alpha * C * Bd\n\n # Convert the explicit solution to tf\n dnum, dden = ss2tf(Ad, Bd, Cd, Dd)\n\n # Compute the discrete tf using cont2discrete.\n c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)\n\n assert_allclose(dnum, c2dnum)\n assert_allclose(dden, c2dden)\n\n # Convert explicit solution to zpk.\n dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)\n\n # Compute the discrete zpk using cont2discrete.\n c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)\n\n assert_allclose(dz, c2dz)\n assert_allclose(dp, c2dp)\n assert_allclose(dk, c2dk)", "def grmt(genotype_file, alpha, beta, n_iters, n_threads):\n\n outfile = os.path.splitext(genotype_file)[0]\n\n tsc.settings.verbosity = \"info\"\n tsc.settings.logfile = f\"{outfile}.grmt.log\"\n\n df_in = tsc.io.read(genotype_file)\n df_out = tsc.tl.grmt(\n df_in, alpha=alpha, beta=beta, n_iters=n_iters, n_threads=n_threads\n )\n tsc.io.write(df_out, f\"{outfile}.grmt.CFMatrix\")\n\n return None", "def test_golay_matches_old_code(self):\r\n NT_TO_BITS = {\"A\": \"11\", \"C\": \"00\", \"T\": \"10\", \"G\": \"01\"}\r\n original = 'GCATCGTCAACA'\r\n rec = 'GCATCGTCCACA'\r\n corr, nt_errs = golay.decode(rec, NT_TO_BITS)\r\n self.assertEqual(corr, original)\r\n self.assertEqual(nt_errs, 2)", "def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(num_gpus=1)\n self._run_benchmark(params)", "def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)", "def test_no_backg_subt():\n \n test_object = fa.read_in_envision(data_csv=HsHis6_PEX5C_vs_HsPEX5C, platemap_csv=Hs_His6_PEX5C_vs_HsPEX5C_platemap, data_type='plate', size=384)\n test_object.calculate_r_i(correct=True, plot_i=False, thr=80)", "def long_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 1\n carriers[-25] = 1\n carriers[-24] = -1\n carriers[-23] = -1\n carriers[-22] = 1\n carriers[-21] = 1\n carriers[-20] = -1\n carriers[-19] = 1\n carriers[-18] = -1\n carriers[-17] = 1\n carriers[-16] = 1\n carriers[-15] = 1\n carriers[-14] = 1\n carriers[-13] = 1\n carriers[-12] = 1\n carriers[-11] = -1\n carriers[-10] = -1\n carriers[-9] = 1\n carriers[-8] = 1\n carriers[-7] = -1\n carriers[-6] = 1\n carriers[-5] = -1\n carriers[-4] = 1\n carriers[-3] = 1\n carriers[-2] = 1\n carriers[-1] = 1\n carriers[0] = 0\n carriers[1] = 1\n carriers[2] = -1\n carriers[3] = -1\n carriers[4] = 1\n carriers[5] = 1\n carriers[6] = -1\n carriers[7] = 1\n carriers[8] = -1\n carriers[9] = 1\n carriers[10] = -1\n carriers[11] = -1\n carriers[12] = -1\n carriers[13] = -1\n carriers[14] = -1\n carriers[15] = 1\n carriers[16] = 1\n carriers[17] = -1\n carriers[18] = -1\n carriers[19] = 1\n carriers[20] = -1\n carriers[21] = 1\n carriers[22] = -1\n carriers[23] = 1\n carriers[24] = 1\n carriers[25] = 1\n carriers[26] = 1\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers)", "def test_encode(self):\n pass # TODO(tlarsen)", "def test_active_inference_SPM_1b(self):", "def short_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 0\n carriers[-25] = 0\n carriers[-24] = 1 + 1j\n carriers[-23] = 0\n carriers[-22] = 0\n carriers[-21] = 0\n carriers[-20] = -1 - 1j\n carriers[-19] = 0\n carriers[-18] = 0\n carriers[-17] = 0\n carriers[-16] = 1 + 1j\n carriers[-15] = 0\n carriers[-14] = 0\n carriers[-13] = 0\n carriers[-12] = -1 - 1j\n carriers[-11] = 0\n carriers[-10] = 0\n carriers[-9] = 0\n carriers[-8] = -1 - 1j\n carriers[-7] = 0\n carriers[-6] = 0\n carriers[-5] = 0\n carriers[-4] = 1 + 1j\n carriers[-3] = 0\n carriers[-2] = 0\n carriers[-1] = 0\n carriers[0] = 0\n carriers[1] = 0\n carriers[2] = 0\n carriers[3] = 0\n carriers[4] = -1 - 1j\n carriers[5] = 0\n carriers[6] = 0\n carriers[7] = 0\n carriers[8] = -1 - 1j\n carriers[9] = 0\n carriers[10] = 0\n carriers[11] = 0\n carriers[12] = 1 + 1j\n carriers[13] = 0\n carriers[14] = 0\n carriers[15] = 0\n carriers[16] = 1 + 1j\n carriers[17] = 0\n carriers[18] = 0\n carriers[19] = 0\n carriers[20] = 1 + 1j\n carriers[21] = 0\n carriers[22] = 0\n carriers[23] = 0\n carriers[24] = 1 + 1j\n carriers[25] = 0\n carriers[26] = 0\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers) * np.sqrt(13 / 6)", "def test_dx10_r8g8b8a8_unorm_srgb():\n\n with Image.open(TEST_FILE_DX10_R8G8B8A8_UNORM_SRGB) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"RGBA\"\n assert im.size == (16, 16)\n assert im.info[\"gamma\"] == 1 / 2.2\n\n assert_image_equal_tofile(\n im, TEST_FILE_DX10_R8G8B8A8_UNORM_SRGB.replace(\".dds\", \".png\")\n )", "def test_Encoder_encode_decode_nack(self):\n interest = Interest(\"/data/test\")\n n = Nack(\"/data/test\", NackReason.NO_CONTENT, interest=interest)\n en = self.encoder1.encode(n)\n dn = self.encoder1.decode(en)\n self.assertTrue(n == dn)", "def test_tdg_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def utest_SGD_Test():\n model_fname = \"../work/model\"\n # test binary classification.\n if False:\n #test_fname = \"../work/train.bz2\"\n test_fname = \"../work/rcv1_test.binary.bz2\"\n if True:\n test_fname = \"../work/iris_multi.train\"\n test_logreg(model_fname,test_fname,prob=True,acc=True)\n pass", "def test_get_integration_time_vals():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n test_array = test_uv.integration_time.copy()\n test_array = test_array.reshape(test_shape)\n assert np.allclose(test_array, inttime_array)", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def _ub_to_gm_one(args):\n tik_instance, dst, data_res, dst_offset, res_offset, ori_nburst, \\\n burst_len, src_stride, dst_stride, cp_align_len = args\n\n if dst_stride <= 65535:\n if ori_nburst <= 4095:\n tik_instance.data_move(\n dst[dst_offset],\n data_res[res_offset],\n 0, ori_nburst, burst_len,\n src_stride, dst_stride)\n\n else:\n n_burst = 4095\n c_cycle = ori_nburst // n_burst\n c_mod = ori_nburst % n_burst\n\n for num_cy in range(c_cycle):\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * num_cy\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * num_cy\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, n_burst, burst_len,\n src_stride, dst_stride)\n\n if c_mod > 0:\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * c_cycle\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * c_cycle\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, c_mod, burst_len,\n src_stride, dst_stride)\n\n else:\n for num_nb in range(ori_nburst):\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * num_nb\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * num_nb\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, 1, burst_len,\n 0, 0)", "def test_bti_ecg_eog_emg(monkeypatch):\n kwargs = dict(rename_channels=False, head_shape_fname=None)\n raw = read_raw_bti(fname_2500, **kwargs)\n ch_types = raw.get_channel_types()\n got = Counter(ch_types)\n # Before improving the triaging in gh-, these values were:\n # want = dict(mag=148, ref_meg=11, ecg=32, stim=2, misc=1)\n want = dict(mag=148, ref_meg=11, ecg=1, stim=2, misc=1, eeg=31)\n assert set(want) == set(got)\n for key in want:\n assert want[key] == got[key], key\n\n # replace channel names with some from HCP (starting from the end)\n # not including UACurrent (misc) or TRIGGER/RESPONSE (stim) b/c they\n # already exist\n got_map = dict(zip(raw.ch_names, ch_types))\n kind_map = dict(\n stim=[\"TRIGGER\", \"RESPONSE\"],\n misc=[\"UACurrent\"],\n )\n for kind, ch_names in kind_map.items():\n for ch_name in ch_names:\n assert got_map[ch_name] == kind\n kind_map = dict(\n misc=[\"SA1\", \"SA2\", \"SA3\"],\n ecg=[\"ECG+\", \"ECG-\"],\n eog=[\"VEOG+\", \"HEOG+\", \"VEOG-\", \"HEOG-\"],\n emg=[\"EMG_LF\", \"EMG_LH\", \"EMG_RF\", \"EMG_RH\"],\n )\n new_names = sum(kind_map.values(), list())\n assert len(new_names) == 13\n assert set(new_names).intersection(set(raw.ch_names)) == set()\n\n def _read_bti_header_2(*args, **kwargs):\n bti_info = _read_bti_header(*args, **kwargs)\n for ch_name, ch in zip(new_names, bti_info[\"chs\"][::-1]):\n ch[\"chan_label\"] = ch_name\n return bti_info\n\n monkeypatch.setattr(mne.io.bti.bti, \"_read_bti_header\", _read_bti_header_2)\n raw = read_raw_bti(fname_2500, **kwargs)\n got_map = dict(zip(raw.ch_names, raw.get_channel_types()))\n got = Counter(got_map.values())\n want = dict(mag=148, ref_meg=11, misc=1, stim=2, eeg=19)\n for kind, ch_names in kind_map.items():\n want[kind] = want.get(kind, 0) + len(ch_names)\n assert set(want) == set(got)\n for key in want:\n assert want[key] == got[key], key\n for kind, ch_names in kind_map.items():\n for ch_name in ch_names:\n assert ch_name in raw.ch_names\n err_msg = f\"{ch_name} type {got_map[ch_name]} !+ {kind}\"\n assert got_map[ch_name] == kind, err_msg", "def test_bits_per_sample(self):\n test_bits_per_sample = 24\n self.encoder._bits_per_sample = test_bits_per_sample\n self.assertEqual(self.encoder._bits_per_sample, test_bits_per_sample)", "def init_ntp_pck(num_of_digits_to_fill_up: int = 12) -> NTP:\n ntp = NTP()\n ntp.ref = ntp_time_now()\n ntp.sent = ntp_time_now()\n ntp.orig = ntp_time_now()\n ntp.recv = ntp_time_now()\n raw_ntp = RawNTP(ntp)\n\n f_ref = raw_ntp.reference_timestamp()\n f_trans = raw_ntp.transmit_timestamp()\n f_orig = raw_ntp.origin_timestamp()\n f_recv = raw_ntp.receive_timestamp()\n\n for i in range(num_of_digits_to_fill_up):\n pos = 64 - i\n f_ref = f_ref[:pos - 1] + str(random.randint(0, 1)) + f_ref[pos:]\n f_trans = f_trans[:pos - 1] + str(random.randint(0, 1)) + f_trans[pos:]\n f_orig = f_orig[:pos - 1] + str(random.randint(0, 1)) + f_orig[pos:]\n f_recv = f_recv[:pos - 1] + str(random.randint(0, 1)) + f_recv[pos:]\n\n assert len(f_ref) == 64\n assert len(f_trans) == 64\n assert len(f_orig) == 64\n assert len(f_recv) == 64\n\n raw_ntp.set_reference_timestamp(f_ref)\n raw_ntp.set_transmit_timestamp(f_trans)\n raw_ntp.set_origin_timestamp(f_orig)\n raw_ntp.set_receive_timestamp(f_recv)\n ntp = raw_ntp.ntp()\n return ntp", "def runit():\n print encode(\"no\")\n\n print encode(\"yes\")\n\n print encode(\"OMG\")\n\n print encode(\"O M G\")\n\n print encode(\"mindblowingly\")\n\n print encode(\"Testing, 1 2 3, testing.\")\n\n print encode(\"Truth is fiction.\")\n\n print encode(\"The quick brown fox jumps over the lazy dog.\")\n\n print decode(\"vcvix rhn\")\n\n print decode(\"zmlyh gzxov rhlug vmzhg vkkrm thglm v\")", "def encoder_test(\n encoder,\n input_data,\n regularizer,\n dropout_rate,\n output_dtype,\n output_shape,\n output_data=None,\n):\n tf.reset_default_graph()\n\n # Run the encoder\n input_data = tf.convert_to_tensor(input_data)\n dropout_rate = tf.convert_to_tensor(dropout_rate)\n is_training = tf.convert_to_tensor(False)\n\n hidden, _ = encoder(\n input_data,\n regularizer,\n dropout_rate,\n is_training=is_training\n )\n\n # Check output shape and type\n assert hidden.dtype == output_dtype\n assert hidden.shape.as_list() == output_shape\n\n if output_data is not None:\n # TODO the hidden output is actually a tensor. May need modification\n assert np.allclose(hidden, output_data)", "def _encode(self, input_dict):\n\n source_sequence, src_length = input_dict['source_tensors']\n\n training = (self._mode == \"train\")\n dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0\n regularizer = self.params.get('regularizer', None)\n data_format = self.params.get('data_format', 'channels_last')\n bn_momentum = self.params.get('bn_momentum', 0.99)\n bn_epsilon = self.params.get('bn_epsilon', 1e-3)\n\n input_layer = tf.expand_dims(source_sequence, axis=-1) # BTFC\n # print(\"<<< input :\", input_layer.get_shape().as_list())\n\n batch_size = input_layer.get_shape().as_list()[0]\n freq = input_layer.get_shape().as_list()[2]\n\n # supported data_formats:\n # BTFC = channel_last (legacy)\n # BCTF = channel_first(legacy)\n # BFTC\n # BCFT\n\n if data_format=='channels_last' or data_format=='BTFC':\n layout = 'BTFC'\n dformat = 'channels_last'\n elif data_format=='channels_first' or data_format=='BCTF':\n layout = 'BCTF'\n dformat = 'channels_first'\n elif data_format=='BFTC':\n layout = 'BFTC'\n dformat = 'channels_last'\n elif data_format=='BCFT':\n layout = 'BCFT'\n dformat = 'channels_first'\n else:\n print(\"WARNING: unsupported data format: will use channels_last (BTFC) instead\")\n layout = 'BTFC'\n dformat = 'channels_last'\n\n #input_layer is BTFC\n\n if layout == 'BCTF':\n top_layer = tf.transpose(input_layer, [0, 3, 1, 2])\n elif layout == 'BFTC':\n top_layer = tf.transpose(input_layer, [0, 2, 1, 3])\n elif layout == 'BCFT':\n top_layer = tf.transpose(input_layer, [0, 3, 2, 1])\n else:\n top_layer = input_layer\n\n # print(\"<<< pre-conv:\", top_layer.get_shape().as_list())\n\n # ----- Convolutional layers ---------------------------------------------\n conv_layers = self.params['conv_layers']\n\n for idx_conv in range(len(conv_layers)):\n ch_out = conv_layers[idx_conv]['num_channels']\n kernel_size = conv_layers[idx_conv]['kernel_size'] # [T,F] format\n strides = conv_layers[idx_conv]['stride'] # [T,F] format\n padding = conv_layers[idx_conv]['padding']\n\n if padding == \"VALID\":\n src_length = (src_length - kernel_size[0] + strides[0]) // strides[0]\n freq = (freq - kernel_size[1] + strides[1]) // strides[1]\n else:\n src_length = (src_length + strides[0] - 1) // strides[0]\n freq = (freq + strides[1] -1) // strides[1]\n\n if layout == 'BFTC' or layout == 'BCFT':\n kernel_size = kernel_size[::-1]\n strides = strides[::-1]\n # print(kernel_size, strides)\n\n top_layer = conv_bn_actv(\n layer_type=\"conv2d\",\n name=\"conv{}\".format(idx_conv + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=self.params['activation_fn'],\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=dformat,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n # print(idx_conv, \"++++\", top_layer.get_shape().as_list())\n\n # convert layout --> BTFC\n # if data_format == 'channels_first':\n # top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n\n if layout == 'BCTF': # BCTF --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n elif layout == 'BFTC': # BFTC --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 1, 3])\n elif layout == 'BCFT': # BCFT --> BTFC\n top_layer = tf.transpose(top_layer, [0, 3, 2, 1])\n\n\n # print(\">>> post-conv:\", top_layer.get_shape().as_list())\n\n # reshape to [B, T, FxC]\n f = top_layer.get_shape().as_list()[2]\n c = top_layer.get_shape().as_list()[3]\n fc = f * c\n top_layer = tf.reshape(top_layer, [batch_size, -1, fc])\n\n # ----- RNN ---------------------------------------------------------------\n num_rnn_layers = self.params['num_rnn_layers']\n if num_rnn_layers > 0:\n rnn_cell_dim = self.params['rnn_cell_dim']\n rnn_type = self.params['rnn_type']\n if self.params['use_cudnn_rnn']:\n # reshape to [B, T, C] --> [T, B, C]\n rnn_input = tf.transpose(top_layer, [1, 0, 2])\n if self.params['rnn_unidirectional']:\n direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION\n else:\n direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION\n\n if rnn_type == \"cudnn_gru\" or rnn_type == \"gru\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnGRU(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_gru\",\n )\n elif rnn_type == \"cudnn_lstm\" or rnn_type == \"lstm\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_lstm\",\n )\n else:\n raise ValueError(\n \"{} is not a valid rnn_type for cudnn_rnn layers\".format(\n rnn_type)\n )\n top_layer, state = rnn_block(rnn_input)\n top_layer = tf.transpose(top_layer, [1, 0, 2])\n else:\n rnn_input = top_layer\n multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n if self.params['rnn_unidirectional']:\n top_layer, state = tf.nn.dynamic_rnn(\n cell=multirnn_cell_fw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False,\n )\n else:\n multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n top_layer, state = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=multirnn_cell_fw, cell_bw=multirnn_cell_bw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False\n )\n # concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]\n top_layer = tf.concat(top_layer, 2)\n # -- end of rnn------------------------------------------------------------\n\n if self.params['row_conv']:\n channels = top_layer.get_shape().as_list()[-1]\n top_layer = row_conv(\n name=\"row_conv\",\n input_layer=top_layer,\n batch=batch_size,\n channels=channels,\n activation_fn=self.params['activation_fn'],\n width=self.params['row_conv_width'],\n regularizer=regularizer,\n training=training,\n data_format=data_format,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n\n # Reshape [B, T, C] --> [B*T, C]\n c = top_layer.get_shape().as_list()[-1]\n top_layer = tf.reshape(top_layer, [-1, c])\n\n # --- hidden layer with clipped ReLU activation and dropout---------------\n top_layer = tf.layers.dense(\n inputs=top_layer,\n units=self.params['n_hidden'],\n kernel_regularizer=regularizer,\n activation=self.params['activation_fn'],\n name='fully_connected',\n )\n outputs = tf.nn.dropout(x=top_layer, keep_prob=dropout_keep_prob)\n\n # reshape from [B*T,A] --> [B, T, A].\n # Output shape: [batch_size, n_steps, n_hidden]\n outputs = tf.reshape(\n outputs,\n [batch_size, -1, self.params['n_hidden']],\n )\n\n return {\n 'outputs': outputs,\n 'src_length': src_length,\n }", "def _write_goft(parameters):\n # Format\n fmt = block_to_format[\"GOFT\"]\n fmt = str2format(fmt[5])\n\n values = [x for x in parameters[\"generator_history\"]]\n out = write_record(values, fmt, multi=True)\n\n return out", "def ca_gtrel_model(pars, solver_options,\n recompile=0):\n support_code = \"\"\"\n #include \"gliotransmission_models.h\"\n \"\"\"\n source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]\n code = \"\"\"\n // Version\n double version = 0.0;\n\n // Define astrocyte model\n gtrelease gliot;\n\n // Declare output structure\n out_gtrelease out;\n\n // Simulator\n out = gliot.simulate(pars,solver_options);\n\n //Output \n return_val = out.make_PyDict();\n \"\"\"\n libs = ['gsl', 'gslcblas', 'm']\n dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]\n vars = ['pars', 'solver_options']\n gtr = weave.inline(code,\n vars,\n support_code=support_code,\n sources=source_files,\n libraries=libs,\n library_dirs=dirs,\n include_dirs=dirs,\n runtime_library_dirs=dirs,\n type_converters=converters.blitz,\n compiler='gcc',\n extra_compile_args=['-std=c++11'],\n force=recompile)\n # Post-stimulus processing\n gtr['twin'] = np.asarray([solver_options['t0'],solver_options['tfin']],dtype=float)\n gtr['twin_gtr'] = np.asarray([solver_options['t0'], solver_options['tfin']], dtype=float) # Time window used in the reconstruction\n # Clean 'x' and provide GRE vector\n i_gre = gtr['xa']>0\n gtr['xa'] = gtr['xa'][i_gre]\n gtr['gre'] = gtr['t'][i_gre]\n # A vector with all the indexes of Gt. CONVENTION: we use negative indexes for astrocytic release. Only one release\n # site in this implementation, i.e. index -1\n gtr['ig'] = -1*np.ones(len(gtr['gre']))\n # Add released GTRs\n gtr['ra'] = pars['ua']*gtr['xa']\n\n return gtr", "def RunAutoEncoder(net, criterion, optimizer, lr_scheduler, train_dl, train_len, test_dl, test_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA, feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], []\n best_test = 0 \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n# labels = encoder_out.max(1)[1].float()\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data, TYPE_PROJ, ETA, ETA_STAR, AXIS, device).to(device)\n \n #testing our model\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.eval()\n \n for i,batch in enumerate(tqdm(test_dl)):\n with torch.no_grad():\n x = batch[0]\n labels = batch[1]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda()\n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n print(\"test accuracy : \", running_accuracy / test_len, \"Total loss:\", running_loss / float(test_len ),'loss_reconstruction: ', running_reconstruction/ test_len ,\\\n 'loss_classification: ',running_classification/ test_len )\n if running_accuracy > best_test :\n best_net_it = e\n best_test = running_accuracy\n torch.save(net.state_dict(), str(outputPath)+\"/best_net\")\n epoch_val_loss.append(running_loss / test_len )\n epoch_val_reconstruction.append( running_reconstruction / test_len )\n epoch_val_classification.append( running_classification / test_len )\n epoch_val_acc.append(running_accuracy / test_len) \n \n print('Epoch du best net = ', best_net_it) \n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n #res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n #res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n #plt.figure()\n #plt.plot( epoch_acc )\n #plt.plot( epoch_val_acc )\n #plt.title('Total accuracy classification')\n #plt.show()\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , best_test, net", "def test_gtf(self):\n #TODO write bed tests", "def save_gyre(filename, header, data):\n with open(filename, 'wt') as f:\n header_length = len(list(header[()]))\n # if header_length == 4:\n # fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n # elif header_length == 5:\n # fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n # else:\n # raise ValueError(\"header should have 4 or 5 components but \"\n # \"it appears to have %i\" % header_length)\n if not 'version' in header.dtype.names:\n fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n else:\n fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n\n f.writelines([fmt % tuple(header[()])])\n\n N = len(data[0])-1\n fmt = ''.join(['%6i',' %26.16E'*N,'\\n'])\n for row in data:\n f.writelines([fmt % tuple(row)])", "def test_make_unifrac_metric2(self):\r\n tree = parse_newick(self.l19_treestr, PhyloNode)\r\n unif = make_unifrac_metric(False, unifrac, True)\r\n otu_data = numpy.array([\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0], # sam1 zeros\r\n [4, 2, 0, 0, 0, 1, 0, 0, 0],\r\n [2, 4, 0, 0, 0, 1, 0, 0, 0],\r\n [1, 7, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 8, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 7, 1, 0, 0, 0, 0, 0, 0],\r\n [0, 4, 2, 0, 0, 0, 2, 0, 0],\r\n [0, 2, 4, 0, 0, 0, 1, 0, 0],\r\n [0, 1, 7, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 8, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 7, 1, 0, 0, 0, 0, 0],\r\n [0, 0, 4, 2, 0, 0, 0, 3, 0],\r\n [0, 0, 2, 4, 0, 0, 0, 1, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0], # sam14 zeros\r\n [0, 0, 0, 8, 0, 0, 0, 0, 0],\r\n [0, 0, 2, 4, 0, 0, 0, 1, 0], # sam 16 now like sam 13\r\n [0, 0, 0, 4, 2, 0, 0, 0, 4],\r\n [0, 0, 0, 2, 4, 0, 0, 0, 1],\r\n [0, 0, 0, 1, 7, 0, 0, 0, 0]\r\n ])\r\n warnings.filterwarnings('ignore')\r\n res = unif(otu_data, self.l19_taxon_names, tree,\r\n self.l19_sample_names)\r\n envs = make_envs_dict(self.l19_data, self.l19_sample_names,\r\n self.l19_taxon_names)\r\n self.assertEqual(res[0, 0], 0)\r\n self.assertEqual(res[0, 13], 0.0)\r\n self.assertEqual(res[12, 15], 0.0)\r\n self.assertEqual(res[0, 1], 1.0)\r\n warnings.resetwarnings()", "def utr(local_dir, cpus, gpus, num_parallel, num_samples):\n\n # Final Version\n\n from design_baselines.mins import mins\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(mins, config={\n \"logging_dir\": \"data\",\n \"task\": \"UTR-ResNet-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"val_size\": 200,\n \"offline\": True,\n \"normalize_ys\": True,\n \"normalize_xs\": False,\n \"base_temp\": 0.1,\n \"keep\": 0.99,\n \"start_temp\": 5.0,\n \"final_temp\": 1.0,\n \"method\": \"wasserstein\",\n \"use_conv\": False,\n \"gan_batch_size\": 128,\n \"hidden_size\": 1024,\n \"num_layers\": 1,\n \"bootstraps\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"oracle_lr\": 0.001,\n \"oracle_batch_size\": 128,\n \"oracle_epochs\": 100,\n \"latent_size\": 32,\n \"critic_frequency\": 10,\n \"flip_frac\": 0.,\n \"fake_pair_frac\": 0.0,\n \"penalty_weight\": 10.,\n \"generator_lr\": 2e-4,\n \"generator_beta_1\": 0.0,\n \"generator_beta_2\": 0.9,\n \"discriminator_lr\": 2e-4,\n \"discriminator_beta_1\": 0.0,\n \"discriminator_beta_2\": 0.9,\n \"initial_epochs\": 200,\n \"epochs_per_iteration\": 0,\n \"iterations\": 0,\n \"exploration_samples\": 0,\n \"exploration_rate\": 0.,\n \"thompson_samples\": 0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def test_encode_webp():\n width = 51\n height = 26\n channels = 3\n bmp_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_image\", \"lena.bmp\")\n with open(bmp_file, 'rb') as f:\n bmp_contents = f.read()\n image_v = tf.image.decode_bmp(bmp_contents)\n assert image_v.shape == [height, width, channels]\n bmp_encoded = image_io.encode_bmp(image_v)\n image_e = tf.image.decode_bmp(bmp_encoded)\n assert np.all(image_v.numpy() == image_e.numpy())", "def test_save_geometric(self):\n G = nx.random_geometric_graph(20, 0.1)\n env = Environment(topology=G)\n f = io.BytesIO()\n env.dump_gexf(f)", "def test_the_model(rtol=0.1):\n\n # the model object\n year = 1.\n m = pyqg.SQGModel(L=2.*pi,nx=128, tmax = 10*year,\n beta = 0., H = 1., rek = 0., rd = None, dt = 1.e-3,\n taveint=year, twrite=1000, ntd=1)\n\n # a vortex merger IC with unit energy\n p = np.exp(-(2.*(m.x-1.75*pi/2))**2.-(2.*(m.y-pi))**2) +\\\n np.exp(-(2.*(m.x-2.25*pi/2))**2.-(2.*(m.y-pi))**2)\n\n ph = m.fft(p[np.newaxis,:,:])\n KEaux = m.spec_var( m.filtr*m.wv*ph )/2.\n ph = ( ph/np.sqrt(KEaux) )\n qih = m.wv*ph\n qi = m.ifft(qih)\n m.set_q(qi)\n\n m.run()\n\n qnorm = (m.q**2).sum()\n mp = m.ifft(m.ph)\n pnorm = (mp**2).sum()\n ke = m._calc_ke()\n\n print 'time: %g' % m.t\n assert m.t == 10.000999999999896\n \n np.testing.assert_allclose(qnorm, 31517.690603406099, rtol)\n np.testing.assert_allclose(pnorm, 5398.52096250875, rtol)\n np.testing.assert_allclose(ke, 0.96184358530902392, rtol)", "def test_rmg_mode(self):\n self.assertEqual(self.rmgmode, False)", "def testSimpleTrendDGP(self):\n N1, N0_sim, N0_not = 1, 50, 50\n N0 = N0_sim + N0_not\n N = N1 + N0\n treated_units, control_units = range(N1), range(N1, N)\n T0, T1 = 2, 1\n T = T0 + T1 # unused\n proto_sim = np.array([1, 0] + [2], ndmin=2)\n proto_not = np.array([0, 1] + [1], ndmin=2)\n te = 2\n proto_tr = proto_sim + np.hstack((np.zeros((1, T0)), np.full((1, T1), te)))\n Y1 = np.matmul(np.ones((N1, 1)), proto_tr)\n Y0_sim = np.matmul(np.ones((N0_sim, 1)), proto_sim)\n Y0_sim = Y0_sim + np.random.normal(0,0.1,Y0_sim.shape)\n #Y0_sim = Y0_sim + np.hstack((np.zeros((N0_sim,1)), \n # np.random.normal(0,0.1,(N0_sim,1)),\n # np.zeros((N0_sim,T-2))))\n Y0_not = np.matmul(np.ones((N0_not, 1)), proto_not)\n Y0_not = Y0_not + np.random.normal(0,0.1,Y0_not.shape)\n Y = np.vstack((Y1, Y0_sim, Y0_not))\n\n unit_treatment_periods = np.full((N), -1)\n unit_treatment_periods[0] = T0\n\n # Y += np.random.normal(0, 0.01, Y.shape)\n\n # OPTIMIZE OVER THE V_PEN'S\n # for v_pen, w_pen in [(1,1), (1,1e-10), (1e-10,1e-10), (1e-10,1), (None, None)]: #\n # print(\"\\nv_pen=%s, w_pen=%s\" % (v_pen, w_pen))\n ret = SC.estimate_effects(\n Y,\n unit_treatment_periods,\n ret_CI=True,\n max_n_pl=200,\n #stopping_rule=4,\n **command_line_options,\n )\n TestDGPs.simple_summ(ret.fits[T0], Y)\n V_penalty = ret.fits[T0].fitted_v_pen\n\n Y_sc = ret.fits[T0].predict(Y)# [control_units, :]\n te_vec_est = (Y - Y_sc)[0:T0:]\n # weight_sums = np.sum(ret.fit.sc_weights, axis=1)\n\n # print(ret.fit.scores)\n p_value = ret.p_value\n #print(\"p-value: %s\" % p_value)\n #print( ret.CI)\n #print(np.diag(ret.fit.V))\n #import pdb; pdb.set_trace()\n # print(ret)\n assert te in ret.CI, \"Confidence interval does not include the true effect\"\n assert p_value is not None\n assert p_value < 0.1, \"P-value is larger than expected\"\n\n # [sc_raw, sc_diff] = ind_sc_plots(Y[0, :], Y_sc[0, :], T0, ind_ci=ret.ind_CI)\n # plt.figure(\"sc_raw\")\n # plt.title(\"Unit 0\")\n # ### SHOW() blocks!!!!\n # # plt.show()\n # plt.figure(\"sc_diff\")\n # plt.title(\"Unit 0\")\n # # plt.show()\n # [te] = te_plot(ret)\n # plt.figure(\"te\")\n # plt.title(\"Average Treatment Effect\")\n # # plt.show()", "def tpm3_1_8_end_genomic():\n return \"TPM3\", \"NC_000001.11\", 154170399, 154170469, -1", "def gru_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):\n with tf.variable_scope(\"gru_seq2seq_bid_encoder\"):\n if inputs is not None:\n inputs_length = common_layers.length_from_embedding(inputs)\n # Flatten inputs.\n inputs = common_layers.flatten4d3d(inputs)\n # LSTM encoder.\n _, final_encoder_state = gru_bid_encoder(\n inputs, inputs_length, hparams, train, \"encoder\")\n else:\n inputs_length = None\n final_encoder_state = None\n # LSTM decoder.\n shifted_targets = common_layers.shift_right(targets)\n # Add 1 to account for the padding added to the left from shift_right\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\n hparams_decoder = copy.copy(hparams)\n hparams_decoder.hidden_size = 2 * hparams.hidden_size\n decoder_outputs, _ = gru(\n common_layers.flatten4d3d(shifted_targets),\n targets_length,\n hparams_decoder,\n train,\n \"decoder\",\n initial_state=final_encoder_state)\n return tf.expand_dims(decoder_outputs, axis=2)", "def theta_v_time():\n pass", "def test_golay600_4bit_errors(self):\r\n for bc in golay600:\r\n if bc.count('A') < 2:\r\n continue # only check those with A's\r\n err_bc = bc.replace('A', 'C', 2)\r\n corr, num_errs = golay.decode(err_bc)\r\n self.assertEqual(corr, None)\r\n self.assertEqual(num_errs, 4)\r\n\r\n for bc in golay600:\r\n if bc.count('T') < 2:\r\n continue # only check those with A's\r\n err_bc = bc.replace('T', 'G', 2)\r\n corr, num_errs = golay.decode(err_bc)\r\n self.assertEqual(corr, None)\r\n self.assertEqual(num_errs, 4)", "def test_single_ended_trans_att_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"ambient\": [slice(0.52 * cable_len, 0.58 * cable_len)],\n \"cold\": [\n slice(0.125 * cable_len, 0.25 * cable_len),\n slice(0.65 * cable_len, 0.70 * cable_len),\n ],\n \"warm\": [slice(0.25 * cable_len, 0.375 * cable_len)],\n }\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # test `trans_att` related functions\n # Clear out old results\n ds_test.set_trans_att([])\n\n assert ds_test.trans_att.size == 0, \"clear out trans_att config\"\n\n del_keys = []\n for k, v in ds_test.data_vars.items():\n if \"trans_att\" in v.dims:\n del_keys.append(k)\n\n assert len(del_keys) == 0, \"clear out trans_att config\"\n\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing alpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )", "def test_serialize_operator_needs_rotation(self, obs, expected):\n dev = QeQiskitDevice(wires=3, shots=1000, backend=\"qasm_simulator\", analytic=False)\n op_str = dev.serialize_operator(obs)\n assert op_str == expected", "def test_PredictionEnsemble_goddard(\r\n perfectModelEnsemble_initialized_control_1d_ym_cftime, smooth\r\n):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n assert pm.smooth(smooth)", "def test_htk(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.htk\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=16, duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 16\n assert info.encoding == \"PCM_S\"", "def test_get_G(self):\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)\n Nfreq = self.ds.Nfreqs\n multiplicative_tolerance = 1.\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n\n for input_data_weight in ['identity','iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n for taper in taper_selection:\n self.ds.clear_cache()\n self.ds.set_taper(taper)\n #print 'input_data_weight', input_data_weight\n self.ds.set_Ndlys(Nfreq-2)\n G = self.ds.get_G(key1, key2)\n self.assertEqual(G.shape, (Nfreq-2, Nfreq-2)) # Test shape\n #print np.min(np.abs(G)), np.min(np.abs(np.linalg.eigvalsh(G)))\n matrix_scale = np.min(np.abs(np.linalg.eigvalsh(G)))\n\n if input_data_weight == 'identity':\n # In the identity case, there are three special properties\n # that are respected:\n # i) Symmetry: G_ab = G_ba\n # ii) Cylic property: G = (1/2) tr[R1 Q_a R2 Q_b]\n # = (1/2) tr[R2 Q_b R1 Q_a]\n # iii) All elements of G are positive.\n\n # Test symmetry\n anti_sym_norm = np.linalg.norm(G - G.T)\n self.assertLessEqual(anti_sym_norm,\n matrix_scale * multiplicative_tolerance)\n\n # Test cyclic property of trace, where key1 and key2 can be\n # swapped without changing the matrix. This is secretly the\n # same test as the symmetry test, but perhaps there are\n # creative ways to break the code to break one test but not\n # the other.\n G_swapped = self.ds.get_G(key2, key1)\n G_diff_norm = np.linalg.norm(G - G_swapped)\n self.assertLessEqual(G_diff_norm,\n matrix_scale * multiplicative_tolerance)\n min_diagonal = np.min(np.diagonal(G))\n\n # Test that all elements of G are positive up to numerical\n # noise with the threshold set to 10 orders of magnitude\n # down from the smallest value on the diagonal\n for i in range(Nfreq-2):\n for j in range(Nfreq-2):\n self.assertGreaterEqual(G[i,j],\n\n -min_diagonal * multiplicative_tolerance)\n else:\n # In general, when R_1 != R_2, there is a more restricted\n # symmetry where swapping R_1 and R_2 *and* taking the\n # transpose gives the same result\n #UPDATE: Taper now occurs after filter so this\n #symmetry only holds when taper = 'none'.\n if taper_selection == 'none':\n G_swapped = self.ds.get_G(key2, key1)\n G_diff_norm = np.linalg.norm(G - G_swapped.T)\n self.assertLessEqual(G_diff_norm,\n matrix_scale * multiplicative_tolerance)", "def test_a_variety_of_precisions(self):\n\n def generator():\n while True:\n trajectory = []\n curr_t = 1582482600\n for i in range(2, randint(4, 10)):\n lat, lon, curr_t = (\n uniform(-180.0, 180.0),\n uniform(-180.0, 180.0),\n curr_t + uniform(2, 60),\n )\n trajectory.append((lat, lon, curr_t))\n yield trajectory\n\n patience = 3 # seconds.\n waypoints, okays = 0, 0\n\n g = generator()\n start = time.time()\n while time.time() < start + patience:\n precision = randint(4, 7)\n wp = next(g)\n waypoints += len(wp)\n traj = trajectory.encode(wp, precision)\n wp2 = trajectory.decode(traj, precision)\n if wp == wp2:\n okays += len(wp2)\n else:\n for idx, _ in enumerate(wp):\n dx, dy, dt = (\n abs(wp[idx][0] - wp2[idx][0]),\n abs(wp[idx][1] - wp2[idx][1]),\n abs(wp[idx][2] - wp2[idx][2]),\n )\n if (\n dx > 10 ** -(precision - 1)\n or dy > 10 ** -(precision - 1)\n or dt > 10 ** -(precision - 1)\n ):\n print(\"idx={}, dx={}, dy={}, dt={}\".format(idx, dx, dy, dt))\n else:\n okays += 1\n\n self.assertEqual(okays, waypoints)\n print(\n \"encoded and decoded {0:.2f}% correctly for {1} waypoints @ {2} wp/sec\".format(\n 100 * okays / float(waypoints),\n waypoints,\n round(waypoints / patience, 0),\n )\n )", "def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"", "def testRoundTrip(self):\n with self.test_session():\n path = os.path.join(\n resource_loader.get_data_files_path(), 'testdata/mono_10khz.wav')\n with open(path, 'rb') as f:\n original_contents = f.read()\n\n audio_op = ffmpeg.decode_audio(\n original_contents, file_format='wav', samples_per_second=10000,\n channel_count=1)\n encode_op = ffmpeg.encode_audio(\n audio_op, file_format='wav', samples_per_second=10000)\n encoded_contents = encode_op.eval()\n self.assertEqual(original_contents, encoded_contents)", "def test_native_token_gas(self):\n qeth = token_id_encode(\"QETH\")\n id1 = Identity.create_random_identity()\n acc1 = Address.create_from_identity(id1, full_shard_key=0)\n acc2 = Address.create_random_account(full_shard_key=0)\n # Miner\n acc3 = Address.create_random_account(full_shard_key=0)\n\n env = get_test_env(\n genesis_account=acc1,\n genesis_minor_token_balances={\"QETH\": 10000000},\n charge_gas_reserve=True,\n )\n state = create_default_shard_state(env=env)\n\n tx = create_transfer_transaction(\n shard_state=state,\n key=id1.get_key(),\n from_address=acc1,\n to_address=acc2,\n value=12345,\n gas=21000,\n gas_token_id=qeth,\n transfer_token_id=qeth,\n )\n\n self.assertTrue(state.add_tx(tx))\n b1 = state.create_block_to_mine(address=acc3)\n self.assertEqual(len(b1.tx_list), 1)\n state.finalize_and_add_block(b1)\n self.assertEqual(state.header_tip, b1.header)\n self.assertEqual(\n state.get_token_balance(acc1.recipient, qeth),\n 10000000 - opcodes.GTXCOST - 12345,\n )\n self.assertEqual(state.get_token_balance(acc2.recipient, qeth), 12345)\n # after-tax coinbase + tx fee should only be in QKC\n self.assertEqual(\n state.get_token_balance(acc3.recipient, self.genesis_token),\n self.get_after_tax_reward(self.shard_coinbase + opcodes.GTXCOST),\n )\n tx_list, _ = state.db.get_transactions_by_address(acc1)\n self.assertEqual(tx_list[0].value, 12345)\n self.assertEqual(tx_list[0].gas_token_id, qeth)\n self.assertEqual(tx_list[0].transfer_token_id, qeth)\n tx_list, _ = state.db.get_transactions_by_address(acc2)\n self.assertEqual(tx_list[0].value, 12345)\n self.assertEqual(tx_list[0].gas_token_id, qeth)\n self.assertEqual(tx_list[0].transfer_token_id, qeth)", "def convertToSpectroGram(self):" ]
[ "0.53550947", "0.5301906", "0.5206506", "0.49940667", "0.49887952", "0.49855092", "0.49552405", "0.48806253", "0.4856835", "0.48504332", "0.48328564", "0.48092747", "0.48080942", "0.477451", "0.475015", "0.47448063", "0.47433698", "0.47394577", "0.47322595", "0.47050416", "0.4703761", "0.47010073", "0.47007525", "0.47005183", "0.4693379", "0.469055", "0.46904805", "0.46882266", "0.46831015", "0.46646288", "0.46573764", "0.46476763", "0.4645917", "0.46396536", "0.46324912", "0.46263897", "0.46226838", "0.4621413", "0.4620055", "0.4618119", "0.4613703", "0.46004543", "0.46001798", "0.45935088", "0.45904285", "0.4584725", "0.4584629", "0.4583062", "0.45829204", "0.4577148", "0.45770246", "0.45765918", "0.45761475", "0.4573668", "0.4549986", "0.4549961", "0.4538047", "0.45359674", "0.45350015", "0.45317006", "0.45295087", "0.4527372", "0.45244658", "0.45237893", "0.45231494", "0.45182967", "0.4512468", "0.45111865", "0.45042318", "0.45019886", "0.44988465", "0.44926906", "0.4492381", "0.44917166", "0.4489163", "0.4483901", "0.44819203", "0.44771028", "0.44768193", "0.4474461", "0.44731438", "0.44715077", "0.44699004", "0.4463879", "0.44629133", "0.44607523", "0.44606152", "0.44598222", "0.44558376", "0.4453376", "0.44511825", "0.44430044", "0.44397154", "0.4434172", "0.4431971", "0.4427519", "0.44252616", "0.44239146", "0.44165927", "0.4414181" ]
0.5863373
0
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database # Get dict mapping courses to unitary weights unitary_dict = db.get_unitary_dict(session) # Get dict mapping courses to adjacent courses and weights edge_dict = db.get_edges_dict(session) # Create CourseNodes for courseid in unitary_dict: courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid]) self._nodes[courseid] = courseNode # Create course edge dict for each CourseNode for courseid in edge_dict: node = self._nodes[courseid] # get node of interest adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight} for otherid in adj_courses: other_node = self._nodes[otherid] node.addEdge(other_node, adj_courses[otherid])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def prepare_graph(\n self,\n adjacency,\n weights,\n weighted=False,\n undirected=False,\n force_dense=True,\n noselfloop=True,\n verbose=True,\n ):\n\n # df_adj = pd.read_csv(in_folder + adj_name, index_col=0) # read adjacency file\n print(\"\\nAdjacency shape: {0}\".format(adjacency.shape), flush=True)\n\n # create the graph adding nodes and edges\n A = self.read_graph(\n adj=adjacency,\n weights=weights,\n weighted=weighted,\n undirected=undirected,\n noselfloop=noselfloop,\n verbose=verbose,\n )\n\n nodes = list(A[0].nodes)\n print(\"\\nNumber of nodes =\", len(nodes), flush=True)\n print(\"Number of layers =\", len(A), flush=True)\n if verbose:\n self.print_graph_stat(A)\n\n # save the multilayer network in a tensor with all layers\n if force_dense:\n B = self.build_B_from_A(A, nodes=nodes)\n else:\n B = self.build_sparse_B_from_A(A)\n\n return A, B, nodes", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def init_graph(self):\n import dgl\n\n adj_list = []\n for rel_type in range(1, self.n_relations, 1):\n edge_idxs = self.ckg.filter_edges(\n lambda edge: edge.data[\"relation_id\"] == rel_type\n )\n sub_graph = (\n dgl.edge_subgraph(self.ckg, edge_idxs, preserve_nodes=True)\n .adjacency_matrix(transpose=False, scipy_fmt=\"coo\")\n .astype(\"float\")\n )\n rowsum = np.array(sub_graph.sum(1))\n d_inv = np.power(rowsum, -1).flatten()\n d_inv[np.isinf(d_inv)] = 0.0\n d_mat_inv = sp.diags(d_inv)\n norm_adj = d_mat_inv.dot(sub_graph).tocoo()\n adj_list.append(norm_adj)\n\n final_adj_matrix = sum(adj_list).tocoo()\n indices = torch.LongTensor([final_adj_matrix.row, final_adj_matrix.col])\n values = torch.FloatTensor(final_adj_matrix.data)\n adj_matrix_tensor = torch.sparse.FloatTensor(indices, values, self.matrix_size)\n return adj_matrix_tensor.to(self.device)", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def __init__(self, nodes, edges, start_kind='Compound', end_kind='Disease', max_length=4, w=0.4, n_jobs=1):\n # Initialize important class variables\n self.w = w\n self.n_jobs = n_jobs\n self.metagraph = None\n self.start_kind = start_kind\n self.end_kind = end_kind\n\n # Placeholders for variables to be defined later\n self.node_file = None\n self.edge_file = None\n self.nodes = None\n self.metaedges = None\n self.adj_matrices = None\n self.out_degree = dict()\n self.in_degree = dict()\n self.degree_weighted_matrices = None\n\n # Mappers to be used later\n self.nid_to_index = None\n self.index_to_nid = None\n self.id_to_metanode = None\n self.metanode_to_ids = None\n self.nid_to_name = None\n self.metanode_to_edges = dict()\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()\n\n # Read and/or store nodes as DataFrame\n if type(nodes) == str:\n self.node_file = nodes\n print('Reading file information...')\n self._read_node_file()\n elif type(nodes) == pd.DataFrame:\n self.node_df = gt.remove_colons(nodes).copy()\n self._validate_nodes()\n\n # Read and/or store edges as DataFrame\n if type(edges) == str:\n self.edge_file = edges\n self._read_edge_file()\n elif type(edges) == pd.DataFrame:\n self.edge_df = gt.remove_colons(edges).copy()\n self._validate_edges()\n\n # Process the Node and Edge information\n print('Processing node and edge data...')\n self._process_nodes()\n self._process_edges()\n\n # Initalize the metagraph and determine the metapaths available\n self._make_metagraph()\n self._determine_metapaths(start_kind, end_kind, max_length)\n self._map_metanodes_to_metaedges()\n\n # Generate the adjacency matrices.\n print('Generating adjacency matrices...')\n time.sleep(0.5)\n self._generate_adjacency_matrices()\n\n # Make Degree Weighted matrices.\n print('\\nDetermining degrees for each node and metaedge'.format(w))\n time.sleep(0.5)\n self._compute_node_degrees()\n\n # Make Degree Weighted matrices.\n print('\\nWeighting matrices by degree with dampening factor {}...'.format(w))\n time.sleep(0.5)\n self._generate_weighted_matrices()", "def neo4j_to_lkg():\n node_types = [\"judge\", \"keyword\", \"case\", \"catch\", \"act\", \"year\"]\n from backend.graph_formation.base.legal_knowledge_graph import LegalKnowledgeGraph\n\n lkg = LegalKnowledgeGraph()\n db = GraphDatabase(ENV[\"DB_URL\"], username=ENV[\"DB_USERNAME\"], password=ENV[\"DB_PASSWORD\"])\n # Authentication for NEO4J Browser\n\n for node_type in node_types:\n q = \"MATCH (c:{}) return c\".format(node_type) #Quering for all nodes in the graph\n results = db.query(q)\n for record in results:\n props={}\n node = record[0]\n if node:\n label = node[\"metadata\"][\"labels\"]\n node_id = node[\"data\"][\"id\"]\n node[\"data\"].pop(\"id\",None)\n props = node[\"data\"]\n props[\"type\"] = label\n lkg.add_node(id, **props)\n for node_type_1 in node_types:\n for node_type_2 in node_types:\n q = \"MATCH (c:{})-[r]->(m:{}) return c,m\".format(node_type_1, node_type_2) # Quering for all Relationships in the graph\n results = db.query(q)\n for record in results:\n node1 , node2 = record\n lkg.add_edge(node1[\"data\"][\"id\"], node2[\"data\"][\"id\"])\n return(lkg)", "def build_graph(self, name='', dump=None, nodes=None, depth_goal=1,\n filter_top=True, remove_isolates=True, add_years=True,\n fill_empty_years=True, model=None, dct=None,\n compute_core_periphery=True, compute_communities=True,\n compute_community_cores=True):\n self.graph = nx.DiGraph()\n self.graph.name = name\n if not dump:\n raise AttributeError('wiki.Net: Provide wiki.Dump object.')\n print('wiki.Net: traversing Wikipedia...')\n Net.bft(self.graph, dump, nodes, depth_goal=depth_goal, \n nodes=nodes, filter_top=filter_top)\n if remove_isolates:\n print('wiki.Net: removing isolates...')\n self.graph.remove_nodes_from(nx.isolates(self.graph))\n if add_years:\n print('wiki.Net: adding years...')\n for node in self.graph.nodes:\n dump.load_page(node)\n self.graph.nodes[node]['year'] = dump.years[0] if len(dump.years)>0 else []\n self.graph.graph['num_years'] = sum(\n [bool(y) for y in nx.get_node_attributes(self.graph, 'year').values()]\n )\n if fill_empty_years:\n print('wiki.Net: filling empty years...')\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=True)\n nodes_filled = True\n while nodes_filled:\n nodes_filled = Net.fill_empty_nodes(self.graph, full_parents=False)\n for node in self.graph.nodes:\n if not self.graph.nodes[node]['year']:\n self.graph.nodes[node]['year'] = Net.MAX_YEAR\n if model and dct:\n print('wiki.Net: calculating weights...')\n self.graph.graph['tfidf'] = Net.compute_tfidf(self.nodes, dump, model, dct)\n Net.set_weights(self.graph)\n if compute_core_periphery:\n print('wiki.Net: computing core-periphery...')\n Net.assign_core_periphery(self.graph)\n if compute_communities:\n print('wiki.Net: computing communities...')\n Net.assign_communities(self.graph)\n if compute_community_cores:\n print('wiki.Net: computing cores within communities...')\n Net.assign_cores_to_communities(self.graph)", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def create_graph(self, lat, lon):\n # Open connection to the database (nodes)\n cur = armaps.model.get_db()\n\n # Get the waypoints\n cur.execute(\n \"SELECT * FROM waypoints WHERE venue_id = %s\", \n (self.venue_id,)\n )\n waypoints = cur.fetchall()\n\n # Get the paths (edges)\n cur.execute(\n \"SELECT * FROM paths WHERE venue_id = %s\",\n (self.venue_id,)\n )\n paths = cur.fetchall()\n\n # Transform list of waypoints into dictionary with key = waypoint_id\n for waypoint in waypoints:\n self.waypoints[int(waypoint[\"waypoint_id\"])] = {\n \"lat\": float(waypoint[\"latitude\"]),\n \"lon\": float(waypoint[\"longitude\"]),\n \"waypoint_id\": int(waypoint[\"waypoint_id\"])\n }\n\n # Calculate weights of edges in graph\n for path in paths:\n # Get two nodes (waypoints) associated with edge\n inNode = int(path[\"innode\"])\n outNode = int(path[\"outnode\"])\n\n # Get the coordinates of nodes\n inNode_coords = (self.waypoints[inNode][\"lat\"], self.waypoints[inNode][\"lon\"])\n outNode_coords = (self.waypoints[outNode][\"lat\"], self.waypoints[outNode][\"lon\"])\n distance = geopy.distance.distance(inNode_coords, outNode_coords).miles\n\n # Add to graph (both ways for undirected)\n self.graph.add_edge(inNode, outNode, distance)\n self.graph.add_edge(outNode, inNode, distance)", "def populate_graph(self):", "def gen_graph(self):", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)", "def generate_graph(self):\n glw = GraphLineWeights()\n\n node_id = 0\n last_key = list(self.storage.keys())[-1]\n\n for key in tqdm.tqdm(self.storage):\n for key_line in self.storage[key]:\n for node in self.storage[key][key_line]:\n # set unique node id and calculate centroid\n node.id = node_id\n node.center_x = node.left + int(node.width / 2)\n node.center_y = node.top + int(node.height / 2)\n node_id += 1\n for key in self.storage:\n for key_line in self.storage[key]:\n for node_with_id in self.storage[key][key_line]:\n # print(node_with_id.word)\n # print(node_with_id.left, node_with_id.top, node_with_id.width, node_with_id.height)\n # consider 4 sides: top, right, bottom, left\n # glw: 0 -> 1 -> 2 -> 3\n # 1. top, verified\n min_dist = self.get_top_node(node_with_id, key - 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 0, node_with_id.top_node_id, min_dist)\n # 2. bottom\n min_dist = self.get_bottom_node(node_with_id, key + 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 2, node_with_id.bottom_node_id, min_dist)\n # 3. left\n min_dist = self.get_left_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 3, node_with_id.left_node_id, min_dist)\n # 4. right\n min_dist = self.get_right_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 1, node_with_id.right_node_id, min_dist)\n\n return glw", "def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)", "def _build_graph(self):\n pass", "def _construct_graph(self):\n raise NotImplementedError", "def __init__(self, nodes, edges, weights='weight', start_kind='Compound', end_kind='Disease',\n scale_weights=True, max_length=4, w=0.4, n_jobs=1):\n\n super().__init__(nodes, edges, start_kind, end_kind, max_length, w, n_jobs)\n\n # Validate the weights\n if isinstance(weights, str):\n # Make sure that the weights is in the column\n assert weights in self.edge_df.columns\n # Ensure that weights are numberic\n assert np.issubdtype(self.edge_df[weights].dtype, np.number)\n # Store the column name\n self.weights = weights\n\n elif isinstance(weights, collections.Iterable):\n # Ensure that there's a weight for every edge\n assert len(weights) == len(self.edge_df)\n # Make sure the weights are numbers\n assert all(isinstance(w, (int, float)) for w in weights)\n # Store the weights and columname\n self.edge_df['weight'] = weights\n self.weights = 'weight'\n self.scale_weights = scale_weights\n if self.scale_weights:\n self.orig_weights = self.weights\n self._scale_weights_to_degree()\n self._scaling_skipped = False\n\n # Make special matrices required for weighted calculations\n self._generate_weighted_adj_matrices()\n self._degree_weight_weighted_matrices()\n self._modified_weighted_adj_matrices = None", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def build_graph(self):\n pass", "def make_graph(self):\n\n # the root node\n self.graph.node(self.playbook_filename, style=\"dotted\", id=\"root_node\")\n\n # loop through the plays\n for play_counter, play in enumerate(self.playbook.get_plays(), 1):\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(\"Loader basedir set to {}\".format(self.data_loader.get_basedir()))\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play #{}: {} ({})\".format(play_counter, clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Graphing \" + play_name)\n\n play_id = \"play_\" + str(uuid.uuid4())\n\n self.graph_representation.add_node(play_id)\n\n with self.graph.subgraph(name=play_name) as play_subgraph:\n color, play_font_color = get_play_colors(play)\n # play node\n play_subgraph.node(play_name, id=play_id, style=\"filled\", shape=\"box\", color=color,\n fontcolor=play_font_color, tooltip=\" \".join(play_hosts))\n\n # edge from root node to plays\n play_edge_id = \"edge_\" + str(uuid.uuid4())\n play_subgraph.edge(self.playbook_filename, play_name, id=play_edge_id, style=\"bold\",\n label=str(play_counter), color=color, fontcolor=color)\n\n # loop through the pre_tasks\n self.display.v(\"Graphing pre_tasks...\")\n nb_pre_tasks = 0\n for pre_task_block in play.pre_tasks:\n nb_pre_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=pre_task_block, color=color,\n current_counter=nb_pre_tasks, play_vars=play_vars,\n node_name_prefix=\"[pre_task] \")\n\n # loop through the roles\n self.display.v(\"Graphing roles...\")\n role_number = 0\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The role '{}' is skipped due to the tags.\".format(role.get_name()))\n # Go to the next role\n continue\n\n role_number += 1\n role_name = \"[role] \" + clean_name(role.get_name())\n\n with self.graph.subgraph(name=role_name, node_attr={}) as role_subgraph:\n current_counter = role_number + nb_pre_tasks\n role_id = \"role_\" + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n role_subgraph.node(role_name, id=role_id)\n # edge from play to role\n role_subgraph.edge(play_name, role_name, label=str(current_counter), color=color,\n fontcolor=color, id=edge_id)\n\n self.graph_representation.add_link(play_id, edge_id)\n self.graph_representation.add_link(edge_id, role_id)\n\n # loop through the tasks of the roles\n if self.options.include_role_tasks:\n role_tasks_counter = 0\n for block in role.compile(play):\n role_tasks_counter = self._include_tasks_in_blocks(current_play=play,\n graph=role_subgraph,\n parent_node_name=role_name,\n parent_node_id=role_id, block=block,\n color=color, play_vars=play_vars,\n current_counter=role_tasks_counter,\n node_name_prefix=\"[task] \")\n role_tasks_counter += 1\n self.display.v(\"{} roles added to the graph\".format(role_number))\n\n # loop through the tasks\n self.display.v(\"Graphing tasks...\")\n nb_tasks = 0\n for task_block in play.tasks:\n nb_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=task_block, color=color,\n current_counter=role_number + nb_pre_tasks,\n play_vars=play_vars, node_name_prefix=\"[task] \")\n\n # loop through the post_tasks\n self.display.v(\"Graphing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, graph=play_subgraph, parent_node_name=play_name,\n parent_node_id=play_id, block=post_task_block, color=color,\n current_counter=nb_tasks, play_vars=play_vars,\n node_name_prefix=\"[post_task] \")\n\n self.display.banner(\"Done graphing {}\".format(play_name))\n self.display.display(\"\") # just an empty line\n # moving to the next play", "def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)", "def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()", "def createOptimizedGraph(routes):\n\n\tgraphClass = Graph(routes, directed=True)\n\n\treturn graphClass.getGraph()", "def iGraphFromTuples(association_tuples):\n \n# #get unique words\n# vocab = set()\n# uppercase_tuples = []\n# for (s,r), stren in association_tuples:\n# uppercase_tuples.append((s.upper(), r.upper(), stren))\n# vocab.update(word_pair)\n \n# vocab = list(vocab) #convert to ordered list\n# \n# \n# graph = Graph(len(vocab), directed=True)\n# graph.vs[\"name\"] = vocab #set vertex names\n# edges, _ = zip(*association_tuples)\n# graph.add_edges(edges)\n #association_tuples = [(s.upper(),r.upper(),stren) for (s,r), stren in association_tuples]\n association_tuples = [(s,r,stren) for (s,r), stren in association_tuples]\n graph = Graph.TupleList(association_tuples, directed=True, weights=True)\n \n graph.vs[\"id\"] = graph.vs[\"name\"]\n \n #add weights\n# for s, r , stren in association_tuples:\n# graph[(s,r)] = stren\n neg_log_proportions = []\n for e in graph.es:\n neg_log_proportions.append(-log10(e[\"weight\"]))\n \n graph.es[\"-log weight\"] = neg_log_proportions\n \n assoc_object = AssociationIGraph()\n assoc_object.graph = graph\n return assoc_object", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def generate_graph(number_of_nodes):\n cities = []\n size = int(math.sqrt(number_of_nodes))\n if size*size != number_of_nodes:\n raise ArgumentError(\"At the moment generate_graph() only takes perfect squares (3, 16, 25 etc.). Feel free to improve it.\")\n test = 0\n for position in range(0, number_of_nodes):\n city = City()\n city.x_position = (position) % size\n city.y_position = int(position / size)\n cities.append(city)\n\n for i_city in range(0, len(cities)):\n city = cities[i_city]\n x_pos = city.x_position\n y_pos = city.y_position\n\n if x_pos != 0:\n city.adjacent_cities.append(cities[i_city - 1])\n\n if x_pos != size-1:\n city.adjacent_cities.append(cities[i_city + 1])\n\n if y_pos != 0:\n city.adjacent_cities.append(cities[i_city - size])\n\n if y_pos != size-1:\n city.adjacent_cities.append(cities[i_city + size])\n\n return cities", "def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()", "def create_weighted_bipartite_graph(G,d):\n\n\tfor k in d.keys():\n\t\tfor v in d[k]:\n\t\t\tG.add_node(v[0],bipartite='code')\n\t\t\tG.add_edge(k,v[0],weight=v[1])\n\n\treturn G", "def __init__(self, graphs: List[Graph], graph_ids: Set[str]) -> None:\n self.graph_ids = graph_ids\n\n # count of link given source & object\n self.c_l_given_so: Dict[Tuple[bytes, bytes], Dict[bytes, int]] = {}\n # count of nodes\n self.c_n: Dict[bytes, int] = {}\n # count of link given source\n self.c_l_given_s: Dict[bytes, Dict[bytes, int]] = {}\n\n # COMPUTE counting\n for g in graphs:\n for link in g.iter_links():\n s = link.get_source_node().label\n o = link.get_target_node().label\n\n # COMPUTE c_l_given_s\n if s not in self.c_l_given_s:\n self.c_l_given_s[s] = {}\n if link.label not in self.c_l_given_s[s]:\n self.c_l_given_s[s][link.label] = 0\n self.c_l_given_s[s][link.label] += 1\n\n # COMPUTE c_l_given_so\n if link.get_target_node().is_data_node():\n # no need to estimate this prob, since it will be result from semantic labeling\n pass\n else:\n if (s, o) not in self.c_l_given_so:\n self.c_l_given_so[(s, o)] = {}\n if link.label not in self.c_l_given_so[(s, o)]:\n self.c_l_given_so[(s, o)][link.label] = 0\n self.c_l_given_so[(s, o)][link.label] += 1\n\n # COMPUTE c_n\n for n in g.iter_nodes():\n if n.label not in self.c_n:\n self.c_n[n.label] = 0\n self.c_n[n.label] += 1\n\n # cached\n self.p_critical_l_given_s = {}\n for s, counts in self.c_l_given_s.items():\n l, c_l = max(counts.items(), key=lambda x: x[1])\n self.p_critical_l_given_s[s] = (l, c_l / self.c_n[s])", "def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def build_graph(nodes):\n\n job_instances_map = {}\n\n # first create node structure\n nodes_map = {}\n root_nodes = []\n for node in nodes:\n new_node = JobGraphNode(node, job_instances_map)\n nodes_map[node.id] = new_node\n # check if it is root node\n try:\n node.relationships.next()\n except StopIteration:\n root_nodes.append(new_node)\n\n # then set relationships\n for _, child in nodes_map.iteritems():\n for relationship in child.cfy_node.relationships:\n parent = nodes_map[relationship.target_node.id]\n parent.add_child(child)\n child.add_parent(parent)\n\n return root_nodes, job_instances_map", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def build_graph(self):\n raise NotImplementedError", "def __initialize_connection_strengths(G):\n G_prime = G.__deepcopy__() # construct a deepcopy of the graph\n # for every vertex in the graph, initialize the connection strength to zero\n for node in G_prime.get_nodeset(): node.add_attribute(StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE, float(0))\n return G_prime # return the new graph", "def build(self):\n self.logger.info('Rebuilding adjacency information')\n self.edges = collections.defaultdict(list)\n\n topic_to_publisher = collections.defaultdict(list)\n topic_to_subscribers = collections.defaultdict(list)\n node_to_missing_deps = collections.defaultdict(list)\n\n result = True\n\n for node in self.nodes.values():\n for topic in node.provided_topics.keys():\n topic_to_publisher[topic].append(node)\n\n for topic in node.required_topics:\n topic_to_subscribers[topic].append(node)\n\n for dep in node.additional_dependencies:\n if dep not in self.nodes:\n node_to_missing_deps[node].append(dep)\n\n if len(node_to_missing_deps) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] managed processes with missing dependencies'.format(len(node_to_missing_deps)), file=msg)\n fmt = ' Managed process [{}] is missing [{}]'\n\n for (node, missing) in node_to_missing_deps.items():\n print(fmt.format(node.name, ', '.join(missing)), file=msg)\n self.logger.error(msg.getvalue())\n\n missing_publishers = []\n for topic in topic_to_subscribers.keys():\n if topic not in topic_to_publisher:\n missing_publishers.append(topic)\n\n if len(missing_publishers) > 0:\n result = False\n msg = io.StringIO()\n print('Found [{}] topics that do not have publishers'.format(len(missing_publishers)), file=msg)\n fmt = ' Topic [{}] with subscribers [{}]'\n\n for topic in missing_publishers:\n print(fmt.format(topic, ', '.join([x.name for x in topic_to_subscribers[topic]])), file=msg)\n self.logger.error(msg.getvalue())\n\n if not result:\n self.logger.error('Found errors when building adjacency information')\n raise GraphBuildError(\n 'Found errors when building adjacency information / graph edges. Check log for details')\n\n # Now we have enough information to build our edges. Phase 1: pub/sub stuff\n for (topic, subscribers) in topic_to_subscribers.items():\n publishers = topic_to_publisher[topic]\n\n for p in publishers:\n for s in subscribers:\n self.edges[p].append(s)\n\n # Phase 2: additional dependencies\n for node in self.nodes.values():\n for dep in node.additional_dependencies:\n src = self.nodes[dep]\n self.edges[src].append(node)", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def generate_random_graph(num_nodes):\n root = Node()\n nodes = set([root])\n edge_count = 0\n num_edges = int(math.log(num_nodes, 1.7)) * num_nodes\n\n for i in range(1, num_nodes):\n node = Node()\n node.edges.add(random.sample(nodes, 1)[0])\n nodes.add(node)\n edge_count += 1\n\n # Generate edges until \n for j in range(edge_count, num_edges):\n tail, head = random.sample(nodes, 2)\n while head in tail.edges:\n tail, head = random.sample(nodes, 2)\n tail.edges.add(head)\n edge_count += 1\n \n # Convert our graph to CSR representation by first creating an adjacency\n # matrix and then transforming it to a CSR\n\n # Generating adjacency matrix\n adjacency_matrix = [[0] * num_nodes for i in range(num_nodes)]\n sums = defaultdict(int)\n stack = [root]\n visited = set()\n while stack:\n curr = stack.pop()\n if curr not in visited:\n visited.add(curr)\n for node in curr.edges:\n stack.append(node)\n adjacency_matrix[curr.id][node.id] = 1.0\n sums[curr.id] += 1\n\n # Adjacency matrix -> CSR\n offset = 0\n csr = [[] for i in range(3)]\n nonzeros = np.nonzero(adjacency_matrix)\n last_row = -1\n for i in range(len(nonzeros[0])):\n row = nonzeros[0][i]\n col = nonzeros[1][i]\n outdegree = sums[row]\n if last_row != row:\n csr[1].append(offset)\n csr[0].append(adjacency_matrix[row][col] / outdegree)\n csr[2].append(col)\n offset += 1\n last_row = row\n csr[1].append(offset)\n\n # Write to txt and pickle\n with open(generate_filepath_txt(num_nodes), \"w\") as fp:\n fp.write(' '.join(str(i) for i in csr[0]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[1]) + '\\n')\n fp.write(' '.join(str(i) for i in csr[2]))\n with open(generate_filepath_pickle(num_nodes), \"wb\") as fp:\n pickle.dump(csr, fp)", "def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))", "def create_graph_domain():\n \n \"\"\"\n Fetch data\n \"\"\"\n \n from input.read_input import read_item_data\n df = read_item_data()\n df['item_id'] = df.index\n dct_title = df['title'].to_dict()\n dct_domain = df['domain_id'].to_dict()\n dct_cat= df['category_id'].to_dict()\n \n dct_price = df['price'].to_dict()\n \n \"\"\" Ratio stuff \"\"\" \n from input.create_ratio import get_ratio\n dct_ratio_dom = get_ratio(which='domain_id')\n \n ratio_df = get_ratio(which='item_id',full=True)\n ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']\n dct_ratio_item_b = ratio_df['popularity'].to_dict()\n \n \n \n \"\"\"\n JSON\n \n \"\"\"\n check = lambda x: x <= np.round(413163*0.8).astype(np.int32)\n \n DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')\n line_i = 0\n \n \n\n \"\"\"\n Create graph vertices\n \"\"\"\n g = ig.Graph() \n from input.read_input import get_mappings\n counter, f_map_func, r_map_func = get_mappings()\n \n num_items = df.shape[0]\n for k in dct_title.keys():\n g.add_vertex(value=k,deg=dct_ratio_item_b[k],domain_id=dct_domain[k],price=dct_price[k],cat='item_id')\n\n \"\"\" ['item_id','domain_id','category_id','product_id'] \"\"\"\n \n for k in pd.unique(df['domain_id']):\n g.add_vertex(value=k,cat='domain_id')\n\n\n for k in pd.unique(df['category_id']):\n g.add_vertex(value=k,cat='category_id')\n\n\n for k in pd.unique(df['product_id']):\n g.add_vertex(value=k,cat='product_id')\n\n \n \n \"\"\"\n Create edges\n \"\"\"\n E1 = []\n E2 = []\n \n with jsonlines.open(DATA_PATH) as reader:\n for line_i, obj in enumerate(reader):\n if check(line_i):\n print(line_i)\n L = []\n for h in obj['user_history']:\n if h['event_type'] == 'view':\n #print(\"Viewed {}\".format(dct[h['event_info']]))\n L.append(h['event_info'])\n elif h['event_type'] == 'search':\n #print(\"Searched {}\".format(h['event_info']))\n pass\n L_domain = [dct_domain[k] for k in L]\n L_domain = pd.unique(L_domain)\n L_cat = [dct_cat[k] for k in L]\n L_cat = pd.unique(L_cat)\n \n for i in range(len(L)):\n E1.append(dct_domain[L[i]])\n E2.append(dct_domain[obj['item_bought']] )\n\n \n \n E1 = f_map_func['domain_id'](E1)\n E2 = f_map_func['domain_id'](E2)\n \n \n E = pd.Series(list(zip(E1,E2))).value_counts()\n g.add_edges(E.index)\n g.es[\"weight\"] = E.values\n \n \n g.write_pickle(fname=path.join(DATA_DIR,'graph_domain_to_domain.pkl'))", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def createWeightedGraph(contourdf, file_list, column_name):\r\n start_time_creating_weighted_graph = time.time()\r\n weights = np.full((len(contourdf)), 1) # initialize weights to one\r\n contourdf['weights'] = weights\r\n # group the dataframe to count path_length(number of nodes in the path)\r\n path_length_df = contourdf.groupby(['level', 'path']).size().reset_index(name='path_length')\r\n path_length_1_df = path_length_df[path_length_df['path_length'] == 1]\r\n cntr_data_weight_0 = contourdf[(np.isin(contourdf['level'], path_length_1_df['level'])) &\r\n (np.isin(contourdf['path'], path_length_1_df['path']))]\r\n cntr_data_weight_0['weights'] = 0\r\n cntr_data__weight_1 = contourdf[~(np.isin(contourdf['level'], path_length_1_df['level'])) |\r\n ~(np.isin(contourdf['path'], path_length_1_df['path']))]\r\n\r\n cntr_data_weight_1_diffrence = (cntr_data__weight_1.shift() - cntr_data__weight_1)\r\n cntr_data_weight_1_diffrence['calculated_weight'] = (np.sqrt(\r\n (cntr_data_weight_1_diffrence['node_x'].values) ** 2 + (\r\n cntr_data_weight_1_diffrence['node_y'].values) ** 2).tolist())\r\n\r\n cntr_data__weight_1['calculated_weight'] = cntr_data_weight_1_diffrence['calculated_weight'].tolist()\r\n cntr_data__weight_1['path_diff'] = cntr_data_weight_1_diffrence['path'].tolist()\r\n weight_list = cntr_data__weight_1['calculated_weight'].tolist()\r\n # for index,row in cntr_data__weight_1.iterrows():\r\n # if(row['path_diff'] != 0):\r\n # weight_list[index] = weight_list[index + 1]\r\n indices = cntr_data__weight_1.loc[cntr_data__weight_1['path_diff'] != 0]\r\n for index, row in indices.iterrows():\r\n weight_list[index] = weight_list[index + 1]\r\n cntr_data__weight_1['act2'] = weight_list\r\n cntr_data__weight_1['actual_weight'] = weight_list\r\n cntr_data__weight_1 = cntr_data__weight_1[['level', 'node_x', 'node_y', 'path', 'actual_weight']]\r\n cntr_data_weight_0['actual_weight'] = cntr_data_weight_0['weights']\r\n cntr_data_weight_0 = cntr_data_weight_0[['level', 'node_x', 'node_y', 'path', 'actual_weight']]\r\n weighted_df = pd.concat([cntr_data_weight_0, cntr_data__weight_1])\r\n weighted_df = weighted_df.sort_values(['level', 'path'])\r\n weighted_df['aggregated_weight'] = weighted_df.groupby(['level', 'path'])['actual_weight'].transform('sum')\r\n weighted_df = weighted_df[['level', 'node_x', 'node_y', 'path', 'aggregated_weight', 'actual_weight']]\r\n weighted_df['normalized'] = (weighted_df['aggregated_weight'] - weighted_df['aggregated_weight'].min()) / (\r\n weighted_df['aggregated_weight'].max() - weighted_df['aggregated_weight'].min())\r\n\r\n data = fetch_direction(file_list, column_name)\r\n\r\n data['node_x_1'] = data['longitude']\r\n data['node_y_1'] = data['latitude']\r\n\r\n weighted_df['node_x_1'] = weighted_df['node_x'] // 1\r\n weighted_df['node_y_1'] = weighted_df['node_y'] // 1\r\n\r\n merged_df = weighted_df.merge(data, how='left')\r\n merged_df = merged_df[['res_x', 'res_y', 'node_x_1', 'node_y_1']]\r\n\r\n weighted_df['res_dir_x'] = merged_df['res_x'].tolist()\r\n weighted_df['res_dir_y'] = merged_df['res_y'].tolist()\r\n\r\n weighted_df['res_dir_x_1'] = weighted_df['res_dir_x'] * weighted_df['actual_weight']\r\n weighted_df['res_dir_y_1'] = weighted_df['res_dir_y'] * weighted_df['actual_weight']\r\n\r\n weighted_df['res_dir_x_1'] = weighted_df.groupby(['level', 'path'])['res_dir_x_1'].transform('sum') / weighted_df[\r\n 'aggregated_weight']\r\n weighted_df['res_dir_y_1'] = weighted_df.groupby(['level', 'path'])['res_dir_y_1'].transform('sum') / weighted_df[\r\n 'aggregated_weight']\r\n\r\n weighted_df['resultant'] = weighted_df['res_dir_x_1'] + weighted_df['res_dir_y_1']\r\n weighted_df['mag'] = np.sqrt(np.square(weighted_df['res_dir_x_1']) + np.square(weighted_df['res_dir_y_1']))\r\n\r\n print(\"For creating a weighted graph %s seconds\" % (time.time() - start_time_creating_weighted_graph))\r\n\r\n return weighted_df", "def get_data(nodes=[]):\n\n # get nodes\n if not nodes:\n nodes = mc.ls(sl=1)\n\n # decipher if the nodes are constraints themselves or are driven by constraints\n nodes = mc.ls(nodes)\n constraints = [n for n in nodes if mc.nodeType(n) in constraint_types]\n non_con_nodes = [n for n in nodes if n not in constraints]\n constraints.extend(utils.get_constraints(non_con_nodes))\n\n data = {}\n\n for constraint in constraints:\n\n # get driven target nodes\n ntype = mc.nodeType(constraint)\n constraint_func = get_constraint_func(ntype)\n driven = mc.listConnections(constraint+'.constraintParentInverseMatrix') or []\n drivers = constraint_func(constraint, q=1, tl=1)\n\n if not ntype in constraint_types or not driven or not drivers:\n continue\n\n driven = list(set(driven))\n weight_alias_list = constraint_func(constraint, q=1, wal=1)\n\n con_data = {\n 'con_type': ntype,\n 'drivers': drivers,\n 'driven': driven,\n 'weight_list': [mc.getAttr(constraint+'.'+w) for w in weight_alias_list]\n }\n\n # Create dict entry for constrant types with upvectors\n if ntype in ['aimConstraint', 'tangentConstraint', 'normalConstraint']:\n\n aim = constraint_func(constraint, q=1, aim=1)\n upv = constraint_func(constraint, q=1, u=1)\n wupv = constraint_func(constraint, q=1, wu=1)\n wut = constraint_func(constraint, q=1, wut=1)\n wuo = constraint_func(constraint, q=1, wuo=1)\n\n if type(wuo) == list:\n wuo = wuo[0]\n\n con_data['aim'] = aim\n con_data['u'] = upv\n con_data['wu'] = wupv\n con_data['wut'] = wut\n con_data['wuo'] = wuo\n\n if mc.objExists(constraint+'.interpType'):\n con_data['interp_type'] = mc.getAttr(constraint+'.interpType')\n\n data[constraint] = con_data\n\n return data", "def build_square_test_graph_with_costs(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2, 2)\n graph.new_edge(1, 4, 10)\n graph.new_edge(2, 3, 3)\n graph.new_edge(3, 4, 1)\n\n return graph", "def get(self):\n self.network = gt.load_graph(self.dotfile)\n\n if self.strongcomponent:\n self.network=gt.extract_largest_component(\n self.network, directed=True, prune=True)\n\n if self.removeselfloops:\n gt.remove_self_loops(self.network)\n\n self.nm = self.network.new_vertex_property(\"string\")\n nm2 = self.network.new_vertex_property(\"string\")\n self.hl = self.network.new_vertex_property(\"bool\")\n self.network.vertex_properties[\"text\"] = self.nm\n self.network.vertex_properties[\"text\"] = nm2\n names=[]\n for v in self.network.vertices():\n if v.out_degree() > -1:\n self.nm[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.hl[v]=False\n else:\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.nm[v]=''\n self.hl[v]=False\n names=names+[nm2[v]]\n\n NAMES=pd.Series(list(set(names)),\n name='varclass').reset_index().set_index('varclass')\n self.varclass = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"varclass\"] = self.varclass\n for v in self.network.vertices():\n self.varclass[v]=NAMES.loc[nm2[v]].values[0]\n\n self.od = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.od\n for v in self.network.vertices():\n self.od[v]=self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=self.exponent)+5\n self.ods = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.ods\n for v in self.network.vertices():\n self.ods[v]=1*self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=1)+2\n\n self.ew = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight\"] = self.ew\n for e in self.network.edges():\n self.ew[e]=float(self.network.ep.weight[e])**1\n\n self.ew_pen = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight_pen\"] = self.ew_pen\n for e in self.network.edges():\n self.ew_pen[e]=4/(1 + np.exp(-.05-np.fabs(float(self.network.ep.weight[e]))))\n\n self.e_marker = self.network.new_edge_property(\"string\")\n self.network.edge_properties[\"e_marker\"] = self.e_marker\n for e in self.network.edges():\n if float(self.network.ep.weight[e]) < 0:\n self.e_marker[e]='bar'\n else:\n self.e_marker[e]='arrow'\n\n self.deg = self.network.degree_property_map(\"out\")\n\n self.ecol = self.network.new_edge_property(\"vector<double>\")\n self.network.edge_properties[\"ecol\"] = self.ecol\n for e in self.network.edges():\n col=cm.ScalarMappable(mpl.colors.Normalize(vmin=-self.edgecollim,\n vmax=self.edgecollim),\n cmap=self.edgecolmap).to_rgba(float(self.ew[e]))\n col=list(col)\n col[3]=self.edgealpha\n self.ecol[e]=tuple(col)\n\n self.pos = gt.graphviz_draw(self.network,\n overlap=False,\n vsize=20,\n sep=self.nodesep,\n output=None)\n\n self.control = self.network.new_edge_property(\"vector<double>\")\n for e in self.network.edges():\n d = np.sqrt(np.sum((self.pos[e.source()].a\n - self.pos[e.target()].a) ** 2))\n d=d/2\n self.control[e] = [0.0,0.0,0, .2*d, 0.5, d,1,0]\n\n if self.outfile is not None:\n gt.graph_draw(self.network,nodesfirst=False,\n pos=self.pos,\n vertex_halo=self.hl,\n vertex_halo_color=[.2,.2,.2,.1],\n edge_pen_width=self.ew_pen,\n edge_end_marker=self.e_marker,\n vorder=self.deg,\n edge_marker_size=10,\n vertex_color=self.varclass,#[.5,.5,.5,.3],\n edge_color=self.ecol,#[.5,.5,.5,.5],\n vertex_pen_width=1.5,\n vertex_size=self.od,\n vertex_text=self.nm,\n vcmap=(self.cmap,self.alpha),\n edge_control_points=self.control,\n vertex_fill_color=self.varclass,#deg,\n vertex_font_size=self.ods,\n vertex_text_color=[.1,.1,.1,.8],\n #vertex_text_position=0,\n output=self.outfile)", "def bgll(self, graph, node_count, min_mod, max_pass):\n\n #the belonging of the node\n bl = [i for i in range(node_count)]\n #the node's weight in community\n _in = [0.0] * node_count\n #the node's weight in graph\n _tot = []\n #total weight of a node, just a copy of _tot\n k = []\n #the total weight of the graph\n m = []\n\n #inital the in-param\n network = [[0.0] * node_count for n in range(node_count)]\n for node, tag, weight in graph:\n network[node][tag] = weight\n for node in network:\n k.append(sum(node))\n _tot = k[:]\n m = sum(k)\n #inital the in-param\n\n def modularity():\n \"\"\"\n This function mainly computes the modularity of the network\n Return:\n mod->the modularity value\n \"\"\"\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q\n\n def modularity_gain(n, c, dnodecomm):\n \"\"\"\n This function mainly computes the modularity gain of a node moving\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n Return:\n gain->modularity gain\n \"\"\"\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m\n\n def neigh_comm(n):\n \"\"\"\n This function mainly computes the weight between the node and it's neighbour community\n Param:\n n->node id\n Return:\n nc->the map of the weight between the node and it's neighbour community\n nc=>{cid,weight}\n \"\"\"\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc\n\n def insert(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of insert the node into community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c\n\n def remove(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of remove the node off community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1\n\n def detect():\n \"\"\"\n This function mainly detect the community of the graph.\n \"\"\"\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod\n\n detect()\n return bl", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output", "def compose_school_graph(school_type, N_classes, class_size, N_floors, \n\t\tstudent_p_children, student_p_parents, teacher_p_adults,\n\t\tteacher_p_children, r_teacher_conversation, r_teacher_friend):\n\tassert N_classes % 2 == 0, 'number of classes needs to be even'\n\n\tG = nx.MultiGraph()\n\n\n\t# add students and their household members as nodes to the graph\n\tfamily_member_counter, family_counter = generate_students(G, school_type, \n\t\t\t\t N_classes, class_size, student_p_children, student_p_parents)\n\n\t# assign students to classes based on their age\n\tassign_classes(G, school_type, class_size, N_classes, N_floors)\n\n\t# add teachers and their household members as nodes to the graph\n\tgenerate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children)\n\n\t# set all contacts between members of families\n\tset_family_contacts(G)\n\n\t# generate intra-class contacts between all students in the same class and\n\t# additional (closer) contacts between table neighbours\n\tset_student_student_intra_class_contacts(G, N_classes)\n\n\t# add short (conversations) and long (meetings, friendships) contacts \n\t# between teachers and other teachers\n\tset_teacher_teacher_social_contacts(G, school_type, N_classes,\n\t\t\t\tr_teacher_conversation, r_teacher_friend)\n\n\t# generate the teacher teaching schedule based on the school type\n\tteacher_schedule = get_scheduler(school_type)(N_classes)\n\t# generate the student schedule based on whether or not there is daycare\n\t# for the given school type\n\tstudent_schedule = generate_student_schedule(school_type, N_classes,\n\t\t\t\t\t\tclass_size)\n\n\t# create teacher links due to team-teaching (currently only relevant for\n\t# lower secondary and upper secondary)\n\tset_teacher_teacher_teamteaching_contacts(G, school_type, teacher_schedule)\n\n\t# create links between teachers and students based on the teaching schedule\n\tset_teacher_student_teaching_contacts(G, school_type, N_classes, \n\t\tteacher_schedule, student_schedule)\n\n\t# generate links between teachers that supervise groups during daycare\n\t# together\n\tset_teacher_teacher_daycare_supervision_contacts(G, school_type, \n\t\tteacher_schedule)\n\n\t# create links between the teachers supervising the afternoon groups and\n\t# all students in the afternoon groups. Note: the information about \n\t# which students are in which afternoon group are taken from the student\n\t# schedule, because students are assigned to afternoon groups at random.\n\tset_teacher_student_daycare_supervision_contacts(G, school_type, N_classes, \n\t\tteacher_schedule, student_schedule)\n\n\t# add student contacts based on the groups they belong to druing the \n\t# afternoon daycare. Only relevant for schools with daycare\n\tset_student_student_daycare_contacts(G, school_type, student_schedule)\n\n\t#teacher_schedule = teacher_schedule.reset_index()\n\t#student_schedule = student_schedule.reset_index() \n\treturn G, teacher_schedule, student_schedule", "def generate_weighted_graph():\n \n Adj_Matrix = np.array([\n [0.0, 0.2, 0.2, 0.3, 0.2, 0.1],\n [0.1, 0.0, 0.3, 0.3, 0.1, 0.2],\n [0.3, 0.2, 0.0, 0.1, 0.2, 0.2],\n [0.1, 0.4, 0.2, 0.0, 0.2, 0.1],\n [0.2, 0.2, 0.2, 0.2, 0.0, 0.2],\n [0.2, 0.1, 0.1, 0.3, 0.3, 0.0]\n ])\n\n return Adj_Matrix", "def _instantiate_learning_graph(self, context=None):\n\n self.learningGraph = OrderedDict()\n self.learningExecutionGraph = OrderedDict()\n\n def build_dependency_sets_by_traversing_projections(sender_mech, process):\n\n # MappingProjections are legal recipients of learning projections (hence the call)\n # but do not send any projections, so no need to consider further\n from PsyNeuLink.Components.Projections.MappingProjection import MappingProjection\n if isinstance(sender_mech, MappingProjection):\n return\n\n # All other sender_mechs must be either a MonitoringMechanism or an ObjectiveMechanism with role=LEARNING\n elif not (isinstance(sender_mech, LearningMechanism) or\n (isinstance(sender_mech, ObjectiveMechanism) and sender_mech.role is LEARNING)):\n raise SystemError(\"PROGRAM ERROR: {} is not a legal object for learning graph;\"\n \"must be a LearningMechanism or an ObjectiveMechanism\".\n format(sender_mech))\n\n\n # MODIFIED 3/12/17 NEW:\n\n # MANAGE TARGET ObjectiveMechanism FOR INTERNAL or TERMINAL CONVERGENCE of PATHWAYS\n\n # If sender_mech is an ObjectiveMechanism, and:\n # - none of the mechanisms that project to it are are a TERMINAL mechanism for the current process, or\n # - all of the mechanisms that project to it already have an ObjectiveMechanism, then:\n # - do not include the ObjectiveMechanism in the graph;\n # - be sure that its outputState projects to the ERROR_SIGNAL inputState of a LearningMechanism\n # (labelled \"learning_mech\" here -- raise an exception if it does not;\n # - determine whether learning_mech's ERROR_SIGNAL inputState receives any other projections\n # from another ObjectiveMechanism or LearningMechanism (labelled \"error_signal_projection\" here)\n # -- if it does, be sure that it is from the same system and if so return;\n # (note: this shouldn't be true, but the test is here for completeness and sanity-checking)\n # - if learning_mech's ERROR_SIGNAL inputState does not receive any projections from\n # another objectiveMechanism and/or LearningMechanism in the system, then:\n # - find the sender to the ObjectiveMechanism (labelled \"error_source\" here)\n # - find the 1st projection from error_source that projects to the ACTIVATION_INPUT inputState of\n # a LearningMechanism (labelled \"error_signal\" here)\n # - instantiate a MappingProjection from error_signal to learning_mech\n # projected\n # IMPLEMENTATION NOTE: Composition should allow 1st condition if user indicates internal TARGET is desired;\n # for now, however, assuming this is not desired (i.e., only TERMINAL mechanisms\n # should project to ObjectiveMechanisms) and always replace internal\n # ObjectiveMechanism with projection from a LearningMechanism (if it is available)\n\n # FIX: RELABEL \"sender_mech\" as \"obj_mech\" here\n\n if isinstance(sender_mech, ObjectiveMechanism) and len(self.learningExecutionGraph):\n\n # TERMINAL CONVERGENCE\n # All of the mechanisms that project to sender_mech\n # project to another ObjectiveMechanism already in the learning_graph\n if all(\n any(\n (isinstance(receiver_mech, ObjectiveMechanism) and\n # its already in a dependency set in the learningExecutionGraph\n receiver_mech in set.union(*list(self.learningExecutionGraph.values())) and\n not receiver_mech is sender_mech)\n # receivers of senders to sender_mech\n for receiver_mech in [proj.receiver.owner for proj in\n mech.outputState.sendsToProjections])\n # senders to sender_mech\n for mech in [proj.sender.owner\n for proj in sender_mech.inputStates[SAMPLE].receivesFromProjections]):\n\n # Get the ProcessingMechanism that projected to sender_mech\n error_source_mech = sender_mech.inputStates[SAMPLE].receivesFromProjections[0].sender.owner\n\n # Get the other ObjectiveMechanism to which the error_source projects (in addition to sender_mech)\n other_obj_mech = next((projection.receiver.owner for projection in\n error_source_mech.outputState.sendsToProjections if\n isinstance(projection.receiver.owner, ObjectiveMechanism)), None)\n sender_mech = other_obj_mech\n\n # INTERNAL CONVERGENCE\n # None of the mechanisms that project to it are a TERMINAL mechanism\n elif not all(all(projection.sender.owner.processes[proc] is TERMINAL\n for proc in projection.sender.owner.processes)\n for projection in sender_mech.inputStates[SAMPLE].receivesFromProjections):\n\n # Get the LearningMechanism to which the sender_mech projected\n try:\n learning_mech = sender_mech.outputState.sendsToProjections[0].receiver.owner\n if not isinstance(learning_mech, LearningMechanism):\n raise AttributeError\n except AttributeError:\n raise SystemError(\"{} does not project to a LearningMechanism in the same process {}\".\n format(sender_mech.name, process.name))\n\n from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.LearningMechanisms.LearningAuxilliary \\\n import ACTIVATION_INPUT, ERROR_SIGNAL\n\n # Get the ProcessingMechanism that projected to sender_mech\n error_source_mech = sender_mech.inputStates[SAMPLE].receivesFromProjections[0].sender.owner\n\n # Get the other LearningMechanism to which the error_source projects (in addition to sender_mech)\n error_signal_mech = next((projection.receiver.owner for projection in\n error_source_mech.outputState.sendsToProjections if\n projection.receiver.name is ACTIVATION_INPUT), None)\n\n\n # Check if learning_mech receives an error_signal_projection\n # from any other ObjectiveMechanism or LearningMechanism in the system;\n # If it does, get the first one found\n error_signal_projection = next ((projection for projection\n in learning_mech.inputStates[ERROR_SIGNAL].receivesFromProjections\n if (isinstance(projection.sender.owner,(ObjectiveMechanism,\n LearningMechanism)) and\n not projection.sender.owner is sender_mech and\n self in projection.sender.owner.systems.values())), None)\n # If learning_mech receives another error_signal projection,\n # reassign sender_mech to the sender of that projection\n if error_signal_projection:\n if self.verbosePref:\n warnings.warn(\"Although {} a TERMINAL mechanism for the {} process, it is an \"\n \"internal mechanism for other proesses in the {} system; therefore \"\n \"its ObjectiveMechanism ({}) will be replaced with the {} LearningMechanism\".\n format(error_source_mech.name,\n process.name,\n self.name,\n sender_mech.name,\n error_signal_mech))\n sender_mech = error_signal_projection.sender.owner\n\n # FIX: FINISH DOCUMENTATION HERE ABOUT HOW THIS IS DIFFERENT THAN ABOVE\n if error_signal_mech is None:\n raise SystemError(\"Could not find projection to an {} inputState of a LearningMechanism for \"\n \"the ProcessingMechanism ({}) that projects to {} in the {} process\"\n \"\".format(ACTIVATION_INPUT,\n error_source_mech.name,\n sender_mech.name,\n process.name))\n else:\n mp = MappingProjection(sender=error_signal_mech.outputStates[ERROR_SIGNAL],\n receiver=learning_mech.inputStates[ERROR_SIGNAL],\n matrix=IDENTITY_MATRIX)\n if mp is None:\n raise SystemError(\"Could not instantiate a MappingProjection \"\n \"from {} to {} for the {} process\".\n format(error_signal_mech.name, learning_mech.name))\n\n sender_mech = error_signal_mech\n # MODIFIED 3/12/17 END\n\n\n # Delete any projections to mechanism from processes or mechanisms in processes not in current system\n for input_state in sender_mech.inputStates.values():\n for projection in input_state.receivesFromProjections:\n sender = projection.sender.owner\n system_processes = self.processes\n if isinstance(sender, Process):\n if not sender in system_processes:\n del projection\n elif not all(sender_process in system_processes for sender_process in sender.processes):\n del projection\n\n # If sender_mech has no projections left, raise exception\n if not any(any(projection for projection in input_state.receivesFromProjections)\n for input_state in sender_mech.inputStates.values()):\n raise SystemError(\"{} only receives projections from other processes or mechanisms not\"\n \" in the current system ({})\".format(sender_mech.name, self.name))\n\n for outputState in sender_mech.outputStates.values():\n\n for projection in outputState.sendsToProjections:\n receiver = projection.receiver.owner\n try:\n self.learningGraph[receiver].add(sender_mech)\n except KeyError:\n self.learningGraph[receiver] = {sender_mech}\n\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n # Do not include dependency (or receiver on sender) in learningExecutionGraph for this projection\n # and end this branch of the traversal if the receiver has already been encountered,\n # but do mark for initialization\n # Notes:\n # * This is because it is a feedback connection, which introduces a cycle into the learningGraph\n # that precludes use of toposort to determine order of execution;\n # however, the feedback projection will still be used during execution\n # so the sending mechanism should be designated as INITIALIZE_CYCLE\n # * Check for receiver mechanism and not its tuple,\n # since the same mechanism can appear in more than one tuple (e.g., with different phases)\n # and would introduce a cycle irrespective of the tuple in which it appears in the learningGraph\n\n if receiver in self.learningExecutionGraph:\n # if receiver in self.learning_execution_graph_mechs:\n # Try assigning receiver as dependent of current mechanism and test toposort\n try:\n # If receiver already has dependencies in its set, add sender_mech to set\n if self.learningExecutionGraph[receiver]:\n self.learningExecutionGraph[receiver].add(sender_mech)\n # If receiver set is empty, assign sender_mech to set\n else:\n self.learningExecutionGraph[receiver] = {sender_mech}\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n list(toposort(self.learningExecutionGraph))\n # If making receiver dependent on sender produced a cycle, remove from learningGraph\n except ValueError:\n self.learningExecutionGraph[receiver].remove(sender_mech)\n receiver.systems[self] = CYCLE\n continue\n\n else:\n # Assign receiver as dependent on sender mechanism\n try:\n # FIX: THIS WILL ADD SENDER_MECH IF RECEIVER IS IN GRAPH BUT = set()\n # FIX: DOES THAT SCREW UP ORIGINS?\n self.learningExecutionGraph[receiver].add(sender_mech)\n except KeyError:\n self.learningExecutionGraph[receiver] = {sender_mech}\n\n if not sender_mech.systems:\n sender_mech.systems[self] = MONITORING\n\n # Traverse list of mechanisms in process recursively\n build_dependency_sets_by_traversing_projections(receiver, process)\n\n # Sort for consistency of output\n sorted_processes = sorted(self.processes, key=lambda process : process.name)\n\n # This assumes that the first mechanism in process.monitoringMechanisms is the last in the learning sequence\n # (i.e., that the list is being traversed \"backwards\")\n for process in sorted_processes:\n if process.learning and process._learning_enabled:\n build_dependency_sets_by_traversing_projections(process.monitoringMechanisms[0], process)\n\n # FIX: USE TOPOSORT TO FIND, OR AT LEAST CONFIRM, TARGET MECHANISMS, WHICH SHOULD EQUAL COMPARATOR MECHANISMS\n self.learningExecutionList = toposort_flatten(self.learningExecutionGraph, sort=False)\n # self.learningExecutionList = self._toposort_with_ordered_mech_tuples(self.learningExecutionGraph)\n\n # Construct monitoringMechanisms and targetMechanisms MechanismLists\n\n # MODIFIED 3/12/17 NEW: [MOVED FROM _instantiate_graph]\n self._monitoring_mech_tuples = []\n self._target_mech_tuples = []\n\n from PsyNeuLink.Components.Projections.MappingProjection import MappingProjection\n for item in self.learningExecutionList:\n if isinstance(item, MappingProjection):\n continue\n\n # If a learning_rate has been specified for the system, assign that to all LearningMechanisms\n # for which a mechanism-specific learning_rate has NOT been assigned\n if (isinstance(item, LearningMechanism) and\n self.learning_rate is not None and\n item.function_object.learning_rate is None):\n item.function_object.learning_rate = self.learning_rate\n\n mech_tuple = self._allMechanisms._get_tuple_for_mech(item)\n if not mech_tuple in self._monitoring_mech_tuples:\n self._monitoring_mech_tuples.append(mech_tuple)\n if isinstance(item, ObjectiveMechanism) and not mech_tuple in self._target_mech_tuples:\n self._target_mech_tuples.append(mech_tuple)\n self.monitoringMechanisms = MechanismList(self, self._monitoring_mech_tuples)\n self.targetMechanisms = MechanismList(self, self._target_mech_tuples)\n # MODIFIED 3/12/17 END\n\n # Instantiate TargetInputStates\n self._instantiate_target_inputs()", "def generate_computational_graph(RHS, schema):\n computational_graph=dict()\n for level in range(3):\n #use brute force to generate candidates for each level\n computational_graph[level]=[]\n if level== 0:\n for attribute in schema:\n if attribute !=RHS:\n computational_graph[level].append(set([attribute]))\n\n else:\n for element1 in computational_graph[level-1]:\n for element2 in computational_graph[0]:\n newelement = element1.union(element2)\n if newelement not in computational_graph[level]:\n if len(newelement)==level+1:\n computational_graph[level].append(newelement)\n\n return computational_graph", "def __init__(self, data=None, cliques=None, taxons=None, namesMap=None, **attr):\n \n if cliques is not None:\n if namesMap:\n nmap = namesMap.getMap()\n cliques = [ [ nmap[n] for n in nset ] for nset in cliques ]\n \n # prevent self-loops\n cliques = [ list(set(nset)) for nset in cliques ]\n \n # attributes dicts\n hyperb_weight = lambda ts: 1/(ts-1) \n e_attr_hyperbWeight=dict()\n e_attr_taxon=dict()\n e_attr_count=dict()\n \n # build edges from records\n if taxons is None: cliques_taxons = map( lambda c: (c,None), cliques)\n else: cliques_taxons = zip(cliques,taxons)\n for clique,taxon in cliques_taxons:\n teamsize=len(clique)\n edgesFromClique = itertools.combinations(clique,2)\n for e in edgesFromClique:\n e = tuple(sorted(e))\n e_attr_count[e] = e_attr_count.get(e,0)+1\n e_attr_taxon[e] = e_attr_taxon.get(e,[])+[taxon] if taxons is not None else None\n e_attr_hyperbWeight[e] = e_attr_hyperbWeight.get(e,0)+hyperb_weight(teamsize)\n \n edges = e_attr_count.keys()\n data = list(edges)\n \n super().__init__(incoming_graph_data=data,**attr)\n \n # insert nodes and set count attribute\n nodes_counts = Counter( col for clique in cliques for col in clique )\n nodes = nodes_counts.keys()\n \n self.add_nodes_from(nodes)\n networkx.set_node_attributes(self,values=nodes_counts,name='count')\n \n # set edges attributes\n networkx.set_edge_attributes(self,e_attr_count,'count')\n networkx.set_edge_attributes(self,e_attr_taxon,'taxons')\n networkx.set_edge_attributes(self,e_attr_hyperbWeight,'weight_hyperbolic')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def fetch_from_sqlite(self):\n conn = get_sqlite()\n c = conn.cursor()\n c.execute('SELECT * FROM vertices ORDER BY id')\n vertices =c.fetchall()\n c.execute('SELECT * FROM edges')\n edges =c.fetchall()\n conn.commit()\n\n self.graph.add_vertices(len(vertices))\n for one in vertices:\n id =int(one[0])\n self.graph.vs[id][\"name\"] = one[1]\n self.graph.vs[id][\"parent\"] = one[2]\n self.graph.vs[id][\"size\"] = one[3]\n self.graph.vs[id][\"last_modified\"] = one[4]\n self.graph.vs[id][\"last_accessed\"] = one[5]\n\n for one in edges:\n self.graph.add_edges([(one[0],one[1])])", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = total_params(tf.trainable_variables())\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _generate_weighted_matrices(self):\n self.degree_weighted_matrices = dict()\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': self.out_degree[metaedge],\n 'degree_rev': self.in_degree[metaedge]})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.degree_weighted_matrices[metaedge] = matrix", "def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents", "def create_computational_graph(node):\n graph = CompGraph()\n graph.build_graph(node)\n return graph", "def gen_W(users, items, ratings):\n\n # initiate graph\n user = users.tolist()\n item = items.tolist()\n rating = ratings.tolist()\n B = nx.Graph()\n B.add_nodes_from(user, bipartite=0)\n B.add_nodes_from(item, bipartite=1)\n\n # create edges\n for i in range(len(user)):\n B.add_edges_from([(user[i], item[i])])\n B[user[i]][item[i]]['weight'] = rating[i]\n\n users_unique = users.unique() \n items_unique = items.unique()\n\n # BiAdjacency matrix - for bipartite network\n W = biadjacency_matrix(B, users_unique,items_unique).toarray()\n\n # sparce form of Biadjacency matrix\n W = spa.csr_matrix(W)\n print('Shape of W: '+str(W.shape))\n\n return W, users_unique, items_unique", "def create(self):\n\n self.init_nodes_db()\n\n # group data\n if len(self.depends) == 0:\n grouped = [((), self.data)]\n else:\n grouped = self.data.groupby(self.depends)\n\n # create all the pymc nodes\n for uniq_elem, grouped_data in grouped:\n if not isinstance(uniq_elem, tuple):\n uniq_elem = (uniq_elem,)\n\n # create new kwargs to pass to the new pymc node\n kwargs = self.kwargs.copy()\n\n # update kwarg with the right parent\n for name, parent in self.parents.items():\n kwargs[name] = parent.get_node(self.depends, uniq_elem)\n\n # get node name\n tag, subj_idx = self.create_tag_and_subj_idx(self.depends, uniq_elem)\n node_name = self.create_node_name(tag, subj_idx=subj_idx)\n\n # get value for observed node\n if self.observed:\n if self.pass_dataframe:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ] # .to_records(index=False)\n else:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ].values # .to_records(index=False)\n\n # Deterministic nodes require a parent argument that is a\n # dict mapping parent names to parent nodes. Knode wraps\n # this; so here we have to fish out the parent nodes from\n # kwargs, put them into a parent dict and put that back\n # into kwargs, which will make pm.Determinstic() get a\n # parent dict as an argument.\n if self.pymc_node is pm.Deterministic:\n parents_dict = {}\n for name, parent in self.parents.items():\n parents_dict[name] = parent.get_node(self.depends, uniq_elem)\n kwargs.pop(name)\n kwargs[\"parents\"] = parents_dict\n\n if self.observed:\n kwargs[\"parents\"][\"value\"] = kwargs[\"value\"]\n\n # Deterministic nodes require a doc kwarg, we don't really\n # need that so if its not supplied, just use the name\n if self.pymc_node is pm.Deterministic and \"doc\" not in kwargs:\n kwargs[\"doc\"] = node_name\n\n node = self.create_node(node_name, kwargs, grouped_data)\n\n if node is not None:\n self.nodes[uniq_elem] = node\n self.append_node_to_db(node, uniq_elem)", "def make_ws_graph(num_nodes, clockwise_neighbours, rewiring_prob):\r\n #initialize empty graph\r\n ws_graph = {}\r\n for vertex in range(num_nodes): ws_graph[vertex] = []\r\n #add each vertex to clockwise neighbours\r\n for vertex in range(num_nodes):\r\n for neighbour in range(vertex + 1, vertex + clockwise_neighbours + 1):\r\n neighbour = neighbour % num_nodes\r\n ws_graph[vertex] += [neighbour]\r\n ws_graph[neighbour] += [vertex]\r\n for vertex in range(num_nodes):\r\n for neighbour in ws_graph[vertex]:\r\n if random.random() < rewiring_prob:\r\n ws_graph[vertex].remove(neighbour)\r\n ws_graph[neighbour].remove(vertex)\r\n randNode = random.randint(0, num_nodes-1)\r\n while(vertex == randNode):\r\n randNode = random.randint(0, num_nodes - 1)\r\n ws_graph[vertex] += [randNode]\r\n ws_graph[randNode] += [vertex]\r\n\r\n\r\n return ws_graph\r\n #rewire each edge with probability rewiring_prob\r\n\r\n #consider each vertex\r\n\r\n #consider each neighbour\r\n\r\n #decide whether to rewire and join to a random node\r\n\r\n #update if necessary\r", "def nodes_from_dict(nd=None,**kwargs):\n\n if not nd:\n err_msg = \"ERROR: No nodes data provided\"\n print(err_msg)\n return 1\n \n nodes = []\n\n ####################\n #Create BUS objects#\n ####################\n busd = {}\n for i, row in nd[\"buses\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n logger.info(\"bus {} will be created\".format(row[\"label\"]))\n bus = solph.Bus(label=row[\"label\"])\n nodes.append(bus)\n busd[row[\"label\"]] = bus\n \n if row[\"excess\"] and not pd.isnull(row[\"excess\"]):\n # Automatically add Sink for curtailment (excess)\n # Add variable cost for excess cost --> minimise curtailment\n nodes.append(\n solph.Sink(\n label=row[\"label\"] + \"_excess\",\n inputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs = row[\"excess costs\"]\n )\n },\n )\n )\n # Automatically add Source for shortage\n # Add variable cost for shortage --> minimize shortage\n if row[\"shortage\"] and not pd.isnull(row[\"shortage\"]):\n nodes.append(\n solph.Source(\n label = row[\"label\"] + \"_shortage\",\n outputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs=row[\"shortage costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects#\n ########################\n for i, row in nd[\"commodity_sources\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs={\n busd[row[\"to\"]]: solph.Flow(\n variable_costs = row[\"variable costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects with fixed time series from 'renewables' table\n ########################\n \"\"\"\n A source can represent a pv-system, a wind power plant, an import of natural gas or a slack variable to avoid creating an in-feasible model.\n While a wind power plant will have an hourly feed-in depending on the weather conditions the natural_gas import might be restricted by \n maximum value (nominal_value) and an annual limit (summed_max). As we do have to pay for imported gas we should set variable costs. \n Comparable to the demand series an fix is used to define a fixed the normalised output of a wind power plant. \n Alternatively, you might use max to allow for easy curtailment. The nominal_value sets the installed capacity.\n \"\"\"\n for i, row in nd[\"renewables\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static outflow values\n outflow_args = {}\n\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == row[\"label\"]:\n outflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # outflow_args[\"fix\"]=nd[\"timeseries\"][col]\n \n # TODO add NON-CONVEX to outflow_args\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n # with investment mode, nominal_value must be None\n logger.info(\"Invest {} capacity\".format(row[\"label\"]))\n invest_args = {}\n if not row[\"epc_invest\"] or pd.isnull(row[\"epc_invest\"]):\n epc_invest = economics.annuity(row[\"capex\"],20,0.08)\n else:\n epc_invest=row[\"epc_invest\"]\n invest_args[\"ep_costs\"] = epc_invest\n\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"]=row[\"min\"]\n\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"]=row[\"existing\"]\n \n outflow_args[\"investment\"] = solph.Investment(**invest_args) \n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"]\n \n # create\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs = {\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n }\n )\n )\n #######################\n # Create Sink objects # \n #######################\n \"\"\"\n A sink is normally used to define the demand within an energy model but it can also be used to detect excesses.\n\n The example shows the electricity demand of the electricity_bus defined above.\n - 'nd['timeseries'][col]' should be sequence of normalised values\n - 'nominal_value' is the maximum demand the normalised sequence is multiplied with.\n - Giving 'nd['timeseries'][col]' as parameter 'fix' means that the demand cannot be changed by the solver. \n \n In contrast to the 'demand sink' the 'excess sink' has normally less restrictions but is open to take the whole excess.\n \"\"\"\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"nominal_value\":de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==de[\"label\"]:\n # inflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # TODO: veriry other key than 'fix'?????\n inflow_args[\"fix\"]=nd[\"timeseries\"][col] \n \n # Create Sink object and append to nodes\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]:solph.Flow(**inflow_args)\n }\n )\n )\n #############################\n # Create Transformer object #\n #############################\n \"\"\"\n An instance of the Transformer class can represent a node with multiple input and output flows such as:\n - a power plant\n - a transport line \n - or any kind of a transforming process as electrolysis, a cooling device or a heat pump. \n The efficiency has to be constant within one time step to get a linear transformation.\n You can define a different efficiency for every time step (e.g. the thermal powerplant efficiency according \n to the ambient temperature) but this series has to be predefined and cannot be changed within the optimisation.\n\n A condensing power plant can be defined by a transformer with one input (fuel) and one output (electricity)\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n solph.Transformer(\n label=\"pp_gas\",\n inputs={bgas: solph.Flow()},\n outputs={b_el: solph.Flow(nominal_value=10e10)},\n conversion_factors={electricity_bus: 0.58})\n ```\n\n A CHP power plant would be defined in the same manner but with two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4})\n ```\n A CHP power plant with 70% coal and 30% natural gas can be defined with two inputs and two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_coal = solph.Bus(label='hard_coal')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow(), b_coal: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4,\n b_coal: 0.7, b_gas: 0.3})\n ```\n \"\"\"\n for i, row in nd[\"transformers\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"variable_costs\":row[\"variable input costs\"]\n }\n # inflow_args = {}\n outflow_args = {}\n # get time series for inflow transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==row[\"label\"]:\n # inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n inflow_args[\"fix\"] = nd[\"timeseries\"][col]\n \n #TODO: multi inputs/outputs and add investment\n\n if row[\"capex inflow\"] and not pd.isnull(row[\"capex inflow\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex inflow\"],20,0.08)\n\n if row[\"max inflow\"] and not pd.isnull(row[\"max inflow\"]):\n invest_args[\"maximum\"] = row[\"max inflow\"]\n\n if row[\"min inflow\"] and not pd.isnull(row[\"min inflow\"]):\n invest_args[\"minimum\"] = row[\"min inflow\"]\n\n if row[\"existing inflow\"] and not pd.isnull(row[\"existing inflow\"]):\n invest_args[\"existing\"] = row[\"existing inflow\"]\n\n inflow_args[\"investment\"] = solph.Investment(**invest_args)\n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"] # should be specify capacity inflow or outflow\n\n # create\n nodes.append(\n solph.Transformer(\n label=row[\"label\"],\n inputs = {\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n },\n conversion_factors = {\n busd[row[\"to\"]]:row[\"efficiency\"]\n }\n )\n )\n ##################################\n # Create Transformer CHP objects #\n ##################################\n for i, row in nd[\"transformers_chp\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_elec_args = {}\n outflow_heat_args = {}\n\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n if row[\"capex elec\"] and not pd.isnull(row[\"capex elec\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex elec\"],20,0.08)\n if row[\"max elec\"] and not pd.isnull(row[\"max elec\"]):\n invest_args[\"maximum\"] = row[\"max elec\"]\n if row[\"min elec\"] and not pd.isnull(row[\"min elec\"]):\n invest_args[\"minimum\"] = row[\"min elec\"]\n if row[\"existing elec\"] and not pd.isnull(row[\"existing elec\"]):\n invest_args[\"existing\"] = row[\"existing elec\"]\n \n outflow_elec_args[\"investment\"] = solph.Investment(**invest_args)\n investment = solph.Investment(**invest_args)\n else:\n # inflow_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_elec_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_heat_args[\"nominal_value\"] = row[\"capacity_heat\"]\n\n # Create\n nodes.append(\n solph.Transformer(\n label = row[\"label\"],\n inputs ={\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to_el\"]]:solph.Flow(**outflow_elec_args),\n busd[row[\"to_heat\"]]:solph.Flow(**outflow_heat_args)\n },\n conversion_factors={\n busd[row[\"to_el\"]]:row[\"efficiency_el\"],\n busd[row[\"to_heat\"]]:row[\"efficiency_heat\"]\n }\n )\n )\n\n ##########################\n # Create Storage objects #\n ##########################\n for i, row in nd[\"storages\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_args = {}\n\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n logger.info(\"Invest {} storage capacity\".format(row[\"label\"]))\n\n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex\"],20,0.08)\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"] = row[\"min\"]\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"] = row[\"existing\"]\n\n investment=solph.Investment(\n **invest_args\n )\n nominal_capacity=None\n \n #TODO add if row[\"capex inflow\"] and if row[\"capex outflow\"]\n #TODO read relation_capacity_inflow/outflow from excel\n \n else:\n investment = None\n nominal_capacity = row[\"nominal capacity\"] \n \n if row[\"capacity inflow\"] and row[\"capacity inflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity inflow or capacity inflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity inflow\"]:\n inflow_args[\"nominal_value\"] = row[\"capacity inflow\"]\n if row[\"capacity inflow ratio\"]:\n capacity_inflow_ratio = row[\"capacity inflow ratio\"]\n else:\n capacity_inflow_ratio = None\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n \n if row[\"capacity outflow\"] and row[\"capacity outflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity outflow or capacity outflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity outflow\"]:\n outflow_args[\"nominal_value\"] = row[\"capacity outflow\"]\n if row[\"capacity outflow ratio\"]:\n capacity_outflow_ratio = row[\"capacity outflow ratio\"]\n else:\n capacity_outflow_ratio = None\n\n outflow_args[\"variable_costs\"] = row[\"variable output costs\"]\n\n nodes.append(\n solph.components.GenericStorage(\n label=row[\"label\"],\n inputs = {\n busd[row[\"bus\"]]:solph.Flow(**inflow_args)\n },\n outputs = {\n busd[row[\"bus\"]]:solph.Flow(**outflow_args)\n },\n investment=investment,\n nominal_storage_capacity=nominal_capacity,\n loss_rate = row[\"capacity loss\"],\n initial_storage_level = row[\"initial capacity\"],\n max_storage_level=row[\"capacity max\"],\n min_storage_level=row[\"capacity min\"],\n invest_relation_input_capacity = capacity_inflow_ratio,\n invest_relation_output_capacity = capacity_outflow_ratio,\n inflow_conversion_factor = row[\"efficiency inflow\"],\n outflow_conversion_factor = row[\"efficiency outflow\"]\n )\n )\n #######################\n # Create Link objects #\n #######################\n \"\"\"\n A Link object with 1...2 inputs and 1...2 outputs\n Note: This component is experimental. Use it with care\n \"\"\"\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"] and not pd.isnull(p[\"active\"]):\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label = \"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs = {\n bus1:solph.Flow(),\n bus2:solph.Flow()\n },\n outputs = {\n bus1: solph.Flow(nominal_value = p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1,bus2):p[\"efficiency\"],\n (bus2,bus1):p[\"efficiency\"]\n }\n )\n ) \n return nodes", "def __build_graph(self):\n all_matches = self.___matches()\n # make new matrix of the student classes and their general studies matrix\n split_reqs = [reqs.split(', ') for reqs in all_matches['ALL'].as_matrix() ]\n rep_values = [line.count(\",\") + 1 for line in all_matches['ALL']]\n CLS = np.repeat(all_matches['FULL'].as_matrix(), rep_values )\n REQ = np.array(list(chain.from_iterable(split_reqs)))\n graph = pd.DataFrame([CLS, REQ]).T\n graph.columns = ['CLS','REQ']\n graph = graph.drop_duplicates()\n return graph", "def __init__(self, no_vertices=0):\r\n self.__neighbours = {}\r\n self.__cost = {}\r\n for i in range(no_vertices):\r\n self.__neighbours[i] = []", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._passage_rank()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _build_graph(self):\n start_t = time.time()\n self.load_data()\n self.get_train_data()\n self.plot_data()\n self._setup_placeholders()\n self.lstm()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def add_graph(self):\n \n self.cd_sampling = None\n \n if \"CD\" in self.algorithm:\n\n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples()\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0: \n \n self.add_mf_updates()\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0:\n \n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples() \n \n self.add_objective()\n\n self.add_grad_updates() \n \n if self.report_p_tilda:\n \n self.add_p_tilda()\n \n self.add_pseudo_cost_measure()\n\n self.optimize = self.optimization_step()", "def make_graph(VERTICES, CLUSTERS, DENSITY):\n CLUSTER_SIZE = VERTICES / CLUSTERS\n clusters = []\n\n for i in xrange(CLUSTERS):\n if i == CLUSTERS - 1:\n vertex_set = range(i * CLUSTER_SIZE, VERTICES)\n else:\n vertex_set = range(i * CLUSTER_SIZE, (i + 1) * CLUSTER_SIZE)\n\n cluster = make_connected(vertex_set, DENSITY * CLUSTER_SIZE)\n clusters += [cluster]\n\n graph = joinClusters(clusters)\n return graph", "def algorithm(df, params):\n\n output = {}\n\n # algorithm specific code\n # construct network analysis\n NW = Network(df, params['relations'])\n output['d3js'] = NW.export_json()\n output['gephi'] = NW.export_gephi()\n output['pajek'] = NW.export_pajek()\n output['assortativity'] = NW.assortativity()\n output['node_attributes'] = NW.node_attributes()\n output['edge_attributes'] = NW.edge_attributes()\n output['strong_components'] = NW.strong_components()\n output['weak_components'] = NW.weak_components()\n output['triads'] = NW.triads()\n\n # plot network\n pruned_network = NW.prune_network()\n output['div'] = plot.plot_network(pruned_network, params['layout'],\n params['relations'],\n title=params['relations']\n + ' Network graph of 500 nodes with highest degree centrality')\n\n return output", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def create_cost_unitary(graph, gamma):\n\n cost_unitary = QuantumCircuit(len(graph.nodes), name=\"Cost Unitary\")\n weights = nx.get_edge_attributes(graph, 'weight').values() # Get weights from graph\n\n # Add corresponding gates for each edge\n for edge, weight in zip(graph.edges, weights):\n cost_unitary.cx(int(edge[0]), int(edge[1]))\n cost_unitary.rz(2*gamma*weight, int(edge[1]))\n cost_unitary.cx(int(edge[0]), int(edge[1]))\n cost_unitary.barrier() # Visually the unitary for each edge\n #cost_unitary.to_gate()\n return cost_unitary", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def build_graph(self):\n start_time = time.time()\n\n # init temp node\n for value in self.domain:\n node = TempDepNode(value)\n self.nodes[value] = node\n\n attr_data = self.graph_data[self.attr_name]\n print(f'{len(attr_data)} records in data')\n\n # init temp edge\n for source_ix, value_i in tqdm(attr_data.items()):\n visited = set()\n for target_ix, value_j in attr_data[source_ix+1:].items():\n if value_j in visited:\n continue\n else:\n visited.add(value_j)\n time_diff = self.graph_data[self.time][target_ix] - \\\n self.graph_data[self.time][source_ix]\n if time_diff > self.time_diff_threshold:\n break\n if (value_i, value_j) not in self.edges or (value_j, value_i) not in self.edges:\n self.edges[(value_i, value_j)] = TempDepEdge(value_i, value_j)\n self.edges[(value_j, value_i)] = TempDepEdge(value_j, value_i)\n self.edges[(value_i, value_j)].add_event(time_diff)\n if value_i != value_j:\n self.edges[(value_j, value_i)].add_event(time_diff)\n end_time = time.time()\n print(f'{end_time-start_time} seconds for graph building')", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def make_graph(self, list_of_paths, view_progress=False):\n\n for i, path in enumerate(list_of_paths):\n\n # No constraints required for first path\n if i == 0:\n self.add_all_edges(path)\n\n else:\n constraint = {}\n\n for (source, dest) in zip(path, path[1:]): \n\n # Add edge to graph if it is new\n if not self.has_edge(source, dest):\n self.add_edge(source, dest)\n\n # Check if source node is a \"branching\" node\n # i.e. has more than one destination node \n if len(self.G[source]) >= 2: \n\n # If path constraints exist, add to edge\n if len(constraint) >= 1:\n\n self.update_edge_constraint(source, dest, constraint)\n self.update_old_branches(source, constraint)\n\n # Add this edge to path constraints\n constraint.update({source: {dest}})\n\n # Once path fully added, update backup copy of Graph\n self.G_backup = copy.deepcopy(self.G)\n\n # Show progress after each new path is added\n if view_progress:\n print('\\nCurrent graph is: ')\n pprint(self.G)\n input(\"Press Enter to continue.\\n\")", "def finalize_graph(self) -> None:\n digraph = nx.MultiDiGraph()\n\n for node in self.graph.iternodes():\n attributes = self.get_attributes(node)\n attributes[\"schema\"] = node.type.name\n if node.caption is not None:\n attributes[\"label\"] = node.caption\n if node.is_entity and node.schema is not None:\n attributes[\"schema\"] = node.schema.name\n digraph.add_node(node.id, **attributes)\n\n for edge in self.graph.iteredges():\n attributes = self.get_attributes(edge)\n attributes[\"schema\"] = edge.type_name\n attributes[\"weight\"] = str(edge.weight)\n digraph.add_edge(edge.source_id, edge.target_id, key=edge.id, **attributes)\n\n for line in generate_gexf(digraph, prettyprint=True):\n self.fh.write(line)\n self.fh.write(\"\\n\")", "def build_triangle_graph_with_costs(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n\n graph.new_edge(1, 2, 1)\n graph.new_edge(2, 3, 2)\n graph.new_edge(3, 1, 10)\n\n return graph", "def create_basic_adjacency_map_3():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\", \"Y\"],\n \"E\": [\"X\"],\n \"X\": [\"Z\"],\n \"Y\": [\"Z\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def __init__(self, n=1):\n vertices = [Vertex(i) for i in range(n)]\n for vertex in vertices:\n self.add_vertex(vertex)\n self.populate_graph()", "def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()", "def add_course(graph, course, parent, color):\n if course[-1] == '*':\n # Concurrent course\n color = 'green'\n course = course[:-1]\n child = pydot.Node(course)\n graph.add_node(child)\n edge = pydot.Edge(parent, course, color=color)\n graph.add_edge(edge)", "def __init__(self):\n self.tree = nx.Graph() \n self.orig_dist_matrix = pd.DataFrame()\n self.work_dist_matrix = pd.DataFrame() \n self.cluster_map = {} \n self.class_map = {}", "def rebuild_graph_data(self, consolidator=None):\n\n if len(self.nodes) == 0:\n raise DomainException(\"No nodes supplied to graph!\")\n\n if consolidator != None:\n for node in self.nodes.values():\n na = set()\n nat = set()\n no = set()\n nr = set()\n ni = set()\n nir = set()\n nrc = Counter()\n for atype, attribute in node.attributes:\n try:\n atype = consolidator(atype)\n except ConsolidatorException:\n continue\n na.add((atype, attribute))\n nat.add(atype)\n\n for rtype, dest in node.outgoing_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n no.add((rtype, dest))\n nr.add(rtype)\n nrc[rtype] += 1\n \n for rtype, pred in node.incoming_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n ni.add((rtype, pred))\n nir.add(rtype)\n nrc[rtype] += 1\n\n #update values\n node.attributes = na\n node.outgoing_relations = no\n node.incoming_relations = ni\n node.rtypes = nr\n node.i_rtypes = nir\n node.atypes = nat\n node.rtype_count = nrc\n\n # ==== compute member variables ====\n self.usage_map = self.map_uses()\n self.usage_counts = {x:len(y) for x,y in self.usage_map.items()}\n self.rtype_vectors = self.index_rtypes()\n self.node_vectors = self.index_nodes()\n self.rkdtree_keys, _rvalues = zip(*self.rtype_vectors.items())\n self.rkdtree = cKDTree(_rvalues)\n self.nkdtree_keys, _nvalues = zip(*self.node_vectors.items())\n self.nkdtree = cKDTree(_nvalues)\n\n # ==== precompute some vector constructs ====\n for node in self.nodes.values():\n node.compute_dicts(self)\n\n # ==== compute tf-idf weights for all nodes ====\n\n #calculate number of nodes containing rtype and \n #find maximum frequency rtype for any single node\n maxftd = 0\n c2 = Counter()\n for y in self.nodes.values():\n for k,z in y.rtype_count.items():\n c2[k] += 1\n if z > maxftd:\n maxftd = z\n\n #calculate augmented term frequency\n tf = Counter()\n for x,y in self.nodes.items():\n for z,v in y.rtype_count.items():\n tf[(x,z)] = 0.5 + 0.5*(v/maxftd)\n\n #calculate inverse document frequency\n idf = Counter()\n N = len(self.nodes)\n for x in c2:\n idf[x] = log(N / c2[x])\n\n tfidf = {}\n for x,y in self.nodes.items():\n for z in y.rtype_count:\n tmp = tfidf.setdefault(x,{})\n tmp[z] = tf[(x,z)] * idf[z]\n\n self.tfidf = tfidf\n self.dirty = False" ]
[ "0.610946", "0.6068693", "0.5970327", "0.59139115", "0.5910554", "0.59065074", "0.5842171", "0.58402723", "0.5831642", "0.5821705", "0.58180076", "0.5813589", "0.5796841", "0.57905227", "0.5715409", "0.5708784", "0.56940794", "0.5655929", "0.5635845", "0.5629975", "0.56149757", "0.56129605", "0.56017387", "0.5577498", "0.5569718", "0.55530834", "0.5528388", "0.55136675", "0.55126846", "0.5476829", "0.5466921", "0.54540855", "0.54415584", "0.5439747", "0.5427362", "0.54155636", "0.5415547", "0.5387961", "0.5387961", "0.5382741", "0.53613484", "0.5361319", "0.5342517", "0.53360564", "0.5328969", "0.53162646", "0.53122735", "0.5310568", "0.53105396", "0.53101444", "0.5307735", "0.52963364", "0.5295824", "0.528474", "0.5276116", "0.5274247", "0.52721673", "0.52719235", "0.52716106", "0.52664405", "0.5266254", "0.52609843", "0.52586496", "0.5255319", "0.52550554", "0.5254774", "0.52513593", "0.5241077", "0.52409804", "0.5234664", "0.52314997", "0.52252614", "0.5221183", "0.52191514", "0.52132744", "0.5202034", "0.5191866", "0.5187881", "0.51840764", "0.51773405", "0.51760864", "0.517214", "0.51716083", "0.51713276", "0.51708794", "0.51708794", "0.51692545", "0.5166241", "0.516507", "0.516164", "0.5161423", "0.51605487", "0.5158073", "0.515681", "0.5155019", "0.51527536", "0.5148819", "0.5130326", "0.51274127", "0.5126797" ]
0.7083467
0
Gets the crosslistings of the top edges from a course
def getTopEdgesFrom(self, session, courseid): node = self.getNode(courseid) # get CourseNode if not node: return [] edges = node.getEdges() # get its Edge dict return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = set(nbr for key in courses[-1] for nbr in wall.neighbors(key))\n course = list(nbrs - seen)\n courses.append(course)\n seen.update(nbrs)\n vertices -= nbrs\n\n return courses", "def GetTopConfidenceEdges(g, dia, topn=20):\r\n edgez = {(e[0], e[1]): e[2]['z'] for e in g.edges(data=True)}\r\n edgeconf = {(e[0], e[1]): e[2]['frac_minority'] for e in g.edges(data=True)}\r\n edgenum = {(e[0], e[1]): e[2]['num_patients'] for e in g.edges(data=True)}\r\n edgez_sorted = sorted(edgez.items(), key=operator.itemgetter(1), reverse=True)[:topn]\r\n newedgez_sorted = []\r\n for e in edgez_sorted:\r\n e = list(e)\r\n edge = e[0]\r\n e.append(edgenum[e[0]])\r\n e.append(edgeconf[e[0]])\r\n newedgez_sorted.append(e)\r\n PrintCodeDescr(g, dia, newedgez_sorted, mode=\"edge\")", "def getCrossWithExtraEdgeInBetweenGraph(self):\n graph = self.graph\n makeLayer = self.makeLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addNodesToLayer = self.addNodesToLayer\n\n leftLayer = makeLayer()\n rightLayer = makeLayer()\n\n leftNodes = addNodesToLayer(3, leftLayer)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n eastWestEdgeFromTo(leftNodes[0], rightNodes[2])\n eastWestEdgeFromTo(leftNodes[1], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[2], rightNodes[0])\n\n return graph", "def decreasing_cover_relations(self):\n relations = []\n for i in range(self.size(), 1, -1):\n for j in range(i - 1, 0, -1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations", "def add_edges(X, c=1):\n n = X.shape[0]\n edges = []\n for i in range(n):\n for j in range(i, n):\n if minkowski_dist(X[i], X[j], c) < 0:\n if X[i][0] < X[j][0]:\n edges.append((i, j))\n else:\n edges.append((j, i))\n return edges", "def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com", "def top_face_edges(faces):\r\n top_edges = list({e for f in faces for e in f.edges})\r\n return sorted(top_edges, key=lambda e: calc_edge_median(e).z, reverse=True)[: len(faces)]", "def increasing_cover_relations(self):\n relations = []\n size = self.size()\n for i in range(1, size):\n for j in range(i + 1, size + 1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations", "def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]", "def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1", "def getRequiredCrossNodes(self, nodes: List[int], identifier: int) -> List[Tuple[int, int, int]]:\n\n return []", "def edge_filtering(S, edge_list, co=0, type_='out'):\n edges = edge_list[:]\n for a in S:\n S_sort = sorted(S[a], key=S[a].get, reverse=True)\n for i in range(len(S[a])):\n b = S_sort[i]\n if (S[a][b] >= co) | (i == 0):\n if type_ != 'out':\n if (b,a) not in edges: edges.append((b,a))\n else:\n if (a,b) not in edges: edges.append((a,b))\n else: break\n return edges", "def strongly_connected_components(G):\n preorder={}\n lowlink={} \n scc_found={}\n scc_queue = []\n scc_list=[]\n i=0 # Preorder counter\n for source in G:\n if source not in scc_found:\n queue=[source]\n while queue:\n v=queue[-1]\n if v not in preorder:\n i=i+1\n preorder[v]=i\n done=1\n v_nbrs=G[v]\n for w in v_nbrs:\n if w not in preorder:\n queue.append(w)\n done=0\n break\n if done==1:\n lowlink[v]=preorder[v]\n for w in v_nbrs:\n if w not in scc_found:\n if preorder[w]>preorder[v]:\n lowlink[v]=min([lowlink[v],lowlink[w]])\n else:\n lowlink[v]=min([lowlink[v],preorder[w]])\n queue.pop()\n if lowlink[v]==preorder[v]:\n scc_found[v]=True\n scc=[v]\n while scc_queue and preorder[scc_queue[-1]]>preorder[v]:\n k=scc_queue.pop()\n scc_found[k]=True\n scc.append(k)\n scc_list.append(scc)\n else:\n scc_queue.append(v)\n scc_list.sort(key=len,reverse=True) \n return scc_list", "def generate_top20_candidates(cosine_sim):\n top20_indices = cosine_sim[0].argsort()[:-21:-1]\n top20_cosine = [cosine_sim[0][i] for i in top20_indices]\n return top20_indices, top20_cosine", "def c_edges(self):\n self.compute_c_edges(self)\n return self._c_edges", "def strongly_connected_component_subgraphs(G):\n cc=strongly_connected_components(G)\n graph_list=[]\n for c in cc:\n graph_list.append(G.subgraph(c))\n return graph_list", "def cc_visited(ugraph):\n \n remaining = set(ugraph.keys())\n ccomp = []\n while len(remaining) > 0:\n node = remaining.pop()\n visited = bfs_visited(ugraph,node)\n ccomp.append(visited)\n remaining.difference_update(visited)\n \n return ccomp", "def findcc(self):\n for vertex in self.graph.vertices:\n if vertex not in self.preOrderNumbers:\n self.dfs(vertex)\n return self.scComponents", "def top(self):\n # if not empty\n if not self.empty():\n return self._data[0].get_value()[1].course\n # if empty\n return None", "def edges(self):\n return map(Edge, self._top_exp.edges())", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def get_courses(std):\n return std[2]", "def edges(self):\n top_exp = TopologyUtils.TopologyExplorer(self.topods_shape(), ignore_orientation=True)\n return map(Edge, top_exp.edges())", "def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp", "def get_crossing(self, threshold, start=None, stop=None, n=1, edge='both'):\n xval_list = self.get_all_crossings(threshold, start=start, stop=stop, edge=edge)\n if len(xval_list) < n:\n return None\n return xval_list[n - 1]", "def eligible_edges(self):\n if len(self.edges) == 4:\n return [self.edges[0], self.edges[2]]\n return []", "def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected", "def nodesBetweenCriticalPoints(self, head: Optional[ListNode]) -> List[int]:\n critical = []\n pos = 1\n while head is not None and head.next is not None and head.next.next is not None:\n if head.val < head.next.val > head.next.next.val or head.val > head.next.val < head.next.next.val:\n critical.append(pos)\n head = head.next\n pos += 1\n if len(critical) <= 1:\n return [-1, -1]\n ret_min = pos\n ret_max = critical[-1] - critical[0]\n for i in range(1, len(critical)):\n ret = critical[i] - critical[i - 1]\n if ret < ret_min:\n ret_min = ret\n return [ret_min, ret_max]", "def strongly_connected_components(G):\n preorder={}\n lowlink={}\n scc_found={}\n scc_queue = []\n i=0 # Preorder counter\n for source in G:\n if source not in scc_found:\n queue=[source]\n while queue:\n v=queue[-1]\n if v not in preorder:\n i=i+1\n preorder[v]=i\n done=1\n v_nbrs=G[v]\n for w in v_nbrs:\n if w not in preorder:\n queue.append(w)\n done=0\n break\n if done==1:\n lowlink[v]=preorder[v]\n for w in v_nbrs:\n if w not in scc_found:\n if preorder[w]>preorder[v]:\n lowlink[v]=min([lowlink[v],lowlink[w]])\n else:\n lowlink[v]=min([lowlink[v],preorder[w]])\n queue.pop()\n if lowlink[v]==preorder[v]:\n scc_found[v]=True\n scc=[v]\n while scc_queue and preorder[scc_queue[-1]]>preorder[v]:\n k=scc_queue.pop()\n scc_found[k]=True\n scc.append(k)\n yield scc\n else:\n scc_queue.append(v)", "def get_top_corners(corners):\n top_corners = np.concatenate(\n [sorted(rect, key=getY)[:2] for rect in corners])\n return sorted(top_corners, key=getX)", "def get_edges(self):\n try:\n temp = self.edges\n except:\n temp = []\n return temp", "def get_bipartition(g):\n # Write your code here.\n colorArr = [-1] * (len(g.nodes()) + 1)\n for node in g.nodes():\n start = g.neighbors(node)\n if len(start)>0:\n src = start.pop()\n break\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while (queue):\n u = queue.pop()\n for v in g.nodes():\n if g.has_edge(u, v) and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif g.has_edge(u, v) and colorArr[u] == colorArr[v]:\n return None\n\n red = set()\n for i in range(1, len(colorArr)):\n if colorArr[i] == 1:\n red.add(i)\n return list(red)\n\n\n\n # Hint! If you'd like to test out these commands without\n # writing a full-fledged program, you might want to familiarise\n # yourself with the Python interactive shell or IPython (available\n # on at least some Aalto IT computers)\n\n # Create a simple line graph g: \"(1)->(2)->(3)\"\n # (The creation parameter is a dict of {node: list_of_neighbors},\n # but this is not something you will be needing in your code.)\n # >>> from networkx import Graph \n # >>> g = Graph({1: [2], 2: [3]})\n # >>> g.number_of_nodes()\n # 3\n\n # Example. Iterate over the nodes and mark them as visited\n # >>> visited = set()\n # >>> for node in g.nodes_iter(): # There is also g.nodes(), which returns a list\n # ... # do some work here\n # ... visited.add(node)\n \n # Example. Given a Node v, get all nodes s.t. there is an edge between\n # v and that node\n # >>> g.neighbors(1)\n # [2]\n\n # Example. Get the edges of the graph:\n # >>> e.edges() # as with nodes, there is also g.edges_iter()\n # [(1, 2), (2, 3)]\n\n # For more information, consult the NetworkX documentation:\n # https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html", "def top(self):\r\n return self.topele", "def get_dropoff_vertices_efficient(G, clusters, all_pairs_distances):\n best_dropoffs = []\n for key in clusters:\n dropoff = clustering_routines.best_dropoff_efficient(G,clusters[key],all_pairs_distances)\n best_dropoffs.append(dropoff)\n return best_dropoffs", "def print_top_n_graphs(C, n):\n ps = sorted(C.P, key=itemgetter(2), reverse=True)\n for i in range(n):\n if i >= len(ps):\n break\n p, c, s = ps[i]\n print(p)\n print(\"Appeared %d times\" % c)", "def get_tri_list(top_tri):\n\ttri_nums = [1]\n\tval = 1\n\twhile tri_nums[-1] < top_tri:\n\t\ttri_val = int(.5*(val*(val+1)))\n\t\ttri_nums.append(tri_val)\n\t\tval += 1\n\treturn tri_nums", "def calc_neck_score(self):\n ordered_c, edgelen = self.coords, self.edgelen\n nc = np.vstack((ordered_c, ordered_c[:edgelen, :]))\n score = []\n for n, ci in enumerate(nc[:-edgelen, :]):\n try:\n score.append(calc_clockwise_degree(ordered_c[n-edgelen, :], nc[n, :], nc[n+edgelen, :]))\n except:\n pass\n idx = np.flipud(np.argsort(score))\n return np.array(score)[idx], ordered_c[idx]", "def _toposort(edges):\r\n incoming_edges = reverse_dict(edges)\r\n incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())\r\n S = set((v for v in edges if v not in incoming_edges))\r\n L = []\r\n\r\n while S:\r\n n = S.pop()\r\n L.append(n)\r\n for m in edges.get(n, ()):\r\n assert n in incoming_edges[m]\r\n incoming_edges[m].remove(n)\r\n if not incoming_edges[m]:\r\n S.add(m)\r\n if any(incoming_edges.get(v, None) for v in edges):\r\n raise ValueError(\"Input has cycles\")\r\n return L", "def topSort(self, x=None, seen=None, order=deque([]), cycle=False):\n\n if x is None:\n for x in self.getVertices(): # choose an arbitrary vertex\n break\n\n if seen is None:\n seen = {vertex: False for vertex in self.getVertices()}\n\n seen[x] = True\n\n for y, weight in self.outboundEdges(x):\n if seen[y]:\n cycle = True\n return False\n\n order = self.topSort(y, seen, order, cycle)\n\n if order == False:\n cycle = True\n return False\n\n\n order.appendleft(x)\n return order\n\n # print(\"%i \" % x, end='')", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def get_graph_breakdown(clustered):\n edges, output = [], []\n combs = itertools.combinations(clustered, 2)\n for i in combs:\n edge1 = i[0][0]\n edge2 = i[1][0]\n dist = haversine_distance(i[0][5], i[0][4],\n i[1][5], i[1][4])\n\n if dist < .8:\n edges.append((edge1, edge2, dist))\n\n G = nx.DiGraph()\n G.add_weighted_edges_from(edges)\n degcent = nx.degree_centrality(G).items()\n\n for i in clustered:\n for j in degcent:\n if i[0] == j[0]:\n output.append([i[0], i[1], i[2],\n i[3], i[4], i[5], i[6], j[1]])\n\n return output", "def pass_through(head_list_at_crossing):\n pass_head_list_at_crossing = []\n pass_head_list_at_crossing.append(head_list_at_crossing[1])\n pass_head_list_at_crossing.append(head_list_at_crossing[0])\n return pass_head_list_at_crossing", "def get_subgraph(graph, min_degree):\n ###TODO\n pass", "def strongly_connected_components_subgraphs(self):\n return [self.subgraph(_) for _ in self.strongly_connected_components()]", "def get_top_news_and_the_rest(self):\n queryset = self.news.order_by('-marked', '-publication_date')\n return queryset.first(), queryset[1:]", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def connected_components(G):\n\n # start with empty list of components\n C = []\n visited = set()\n\n for v in G.get_vertices():\n if v not in visited:\n span = spanning_edges(G, v)\n component = set()\n\n for x in span:\n for y in x:\n component.add(y)\n visited.add(y)\n\n C.append(component)\n\n # sort the components list by the min on each element\n C.sort(key=min)\n return C", "def testGetCrossConnects(self):\n\n self.oxc.get_crossconnects(file_name = 'get_crossconnects.xml')", "def filter_edges(self,edges):\n return list(filter(lambda x: x[0]<x[1],edges))", "def getFixedPortOrderInLayerEdgesWithCrossings(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n addInLayerEdge = self.addInLayerEdge\n setFixedOrderConstraint = self.setFixedOrderConstraint\n\n layer = makeLayer(graph)\n nodes = addNodesToLayer(2, layer)\n setFixedOrderConstraint(nodes[0])\n setFixedOrderConstraint(nodes[1])\n addInLayerEdge(nodes[0], nodes[1], PortSide.EAST)\n addInLayerEdge(nodes[0], nodes[1], PortSide.EAST)\n\n return graph", "def get_neighbour_vertices(self, cur: Union[str, int]) -> list:\n\t\tvertices = [edge[0] if edge[1] == cur else edge[1] for edge in self.get_neighbour_edges(cur)]\n\t\treturn vertices", "def edges(self):\r\n return self.__generate_edges()", "def edges(self):\n return self.generate_edges()", "def communityGraph(graph):\n\n lapgr = nx.laplacian_matrix(graph)\n\n # Get the eigenvalues and eigenvectors of the Laplacian matrix\n evals, evec = np.linalg.eigh(lapgr.todense())\n\n fiedler = evec[1]\n results = []\n ## \"Fiedler\", fiedler\n median = np.median(fiedler, axis=1) # median of the second eigenvalue\n for i in range(0, fiedler.size): # divide the graph nodes into two\n if(fiedler[0, i] < median):\n results.append(0)\n else:\n results.append(1)\n return results, evals, evec", "def get_edges(self):\n return [tuple(edge) for edge in self._tree.tree_grid[1:3, :].T]", "def get_top_categories(self, train: NumpyOrPandas, top_n: int = 5) -> List[str]:\n if self.max_intersection_depth <= 1 or self.top_intersections <= 1:\n return []\n\n cats = get_columns_by_role(train, \"Category\")\n if len(cats) == 0:\n return []\n\n df = DataFrame({\"importance\": 0, \"cardinality\": 0}, index=cats)\n # importance if defined\n if self.feats_imp is not None:\n feats_imp = Series(self.feats_imp.get_features_score()).sort_values(ascending=False)\n df[\"importance\"] = feats_imp[feats_imp.index.isin(cats)]\n df[\"importance\"].fillna(-np.inf)\n\n # check for cardinality\n df[\"cardinality\"] = self.get_uniques_cnt(train, cats)\n # sort\n df = df.sort_values(\n by=[\"importance\", \"cardinality\"],\n ascending=[False, self.ascending_by_cardinality],\n )\n # get top n\n top = list(df.index[:top_n])\n\n return top", "def get_edges(self):\n edge_from = []\n edge_to = []\n global mount_points\n global sorted_mount_points\n if sorted_mount_points is None:\n logger.debug(\"Mount points [%s]\", mount_points)\n sorted_mount_points = sort_mount_points(mount_points.keys())\n logger.info(\"Sorted mount points [%s]\", sorted_mount_points)\n\n # Look for the occurance in the list\n mpi = sorted_mount_points.index(self.mount_point)\n if mpi > 0:\n # If not the first: add also the dependency\n dep = mount_points[sorted_mount_points[mpi - 1]]\n edge_from.append(dep.name)\n\n edge_from.append(self.base)\n return (edge_from, edge_to)", "def colored_edges(genome):\n edges = []\n for chromo in genome:\n nodes = [0] + chromosome_to_cycle(chromo)\n nodes.append(nodes[1])\n for j in range(1, len(chromo) + 1):\n edges.append((nodes[2 * j], nodes[2 * j + 1]))\n\n return edges", "def exact_min_vertex_cover(graph):\n for N in range(1,len(graph.nodes())+1):\n for graph_sub in it.combinations(sorted(graph.nodes(), reverse=True), N):\n graph_temp = graph.copy()\n graph_temp.remove_nodes_from(graph_sub)\n if len(graph_temp.edges()) == 0:\n return list(graph_sub)", "def find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple", "def get_head_and_tail(street):\r\n def is_end(location):\r\n num_adj = 0\r\n adj = get_orthogonal(location)\r\n for merchant in street:\r\n if merchant != location and merchant in adj:\r\n num_adj += 1\r\n return num_adj <= 1\r\n return [loc for loc in street if is_end(loc)]", "def walk_sort(edges):\n g = nx.Graph()\n g.add_edges_from(edges)\n connected = set()\n degree = nx.degree(g)\n ordering = []\n while degree:\n next = max_degree_node(g, degree, connected)\n if next is not None:\n ordering.append(next)\n else:\n break\n return ordering", "def top_level(self):\n top = set(self)\n for cell in self:\n top.difference_update(cell.get_dependencies())\n return list(top)", "def edges(self):\n table = []\n edge_index = self.lofted_car.edges\n edge_radius = np.array([0.04166667, 0., 0.01041667, 0., 0.04166667,\n 0.02083333, 0.01041667, 0.02083333, 0.02083333,\n 0.02083333,\n 0.04166667, 0., 0.01041667, 0.04166667, 0.,\n 0.01041667, 0., 0.]) * self.length_car\n for i in range(len(edge_radius)):\n if edge_radius[i] != 0.:\n table_instance = (edge_index[i], edge_radius[i])\n table.append(table_instance)\n return table", "def puzzle_leaders(self, n=10, courses=None):\r\n from foldit.models import Score\r\n\r\n if courses is None:\r\n courses = [self.location.course_key]\r\n\r\n leaders = [(leader['username'], leader['score']) for leader in Score.get_tops_n(10, course_list=courses)]\r\n leaders.sort(key=lambda x: -x[1])\r\n\r\n return leaders", "def FindTopToBottomEdges(self, bwImage):\n\n rowList = [\"y=1/98x-69\", \"y=-1/97+70\"] # Temp List\n return rowList", "def top_tiles(self):\n sorted_tiles = self.tiles_by_score()\n top_tiles = sorted_tiles[:NUM_TOP_TILES]\n return top_tiles", "def _get_odd_degree_vertices(graph):\n odd_degree_vertices = set()\n for index, row in enumerate(graph):\n if len(np.nonzero(row)[0]) % 2 != 0:\n odd_degree_vertices.add(index)\n return odd_degree_vertices", "def _get_odd_degree_vertices(graph):\n odd_degree_vertices = set()\n for index, row in enumerate(graph):\n if len(np.nonzero(row)[0]) % 2 != 0:\n odd_degree_vertices.add(index)\n return odd_degree_vertices", "def get_top(self):\n elements = self.S.get_maximal_elements()\n data = {}\n alot = Nat().get_top()\n for e in elements:\n data[e] = alot\n return Multiset(data, self.S)", "def get_edges(self):\n for i in self.gens:\n if self.active[i]:\n elist = set()\n H = (i,) # edge-stabilizing subgroup\n reps = set(self.word_generator(parabolic=H))\n reps = self.G.sort_words(reps)\n for word in reps:\n v1 = self.G.move(self.vtable, 0, word)\n v2 = self.G.move(self.vtable, 0, word + (i,))\n if v1 is not None and v2 is not None:\n if v1 > v2:\n v1, v2 = v2, v1\n if (v1, v2) not in elist:\n elist.add((v1, v2))\n\n self.edge_indices[i] = elist\n\n self.num_edges = sum(len(L) for L in self.edge_indices.values())", "def pairs_of_vertices(self):\n pairs_of_vertices = []\n for vertice in self.list_of_vertices:\n for edge in vertice.edges_list:\n if non_oriented:\n if (vertice, edge.linked[1]) and (edge.linked[1], vertice) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n if not non_oriented:\n if (vertice, edge.linked[1]) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n return pairs_of_vertices", "def crossHalfLine(self, other):\n points = []\n for segment in self.segments:\n cross = other.crossSegment(segment)\n if cross:\n points.append(cross)\n hp = other.point\n objects = [(p, Point.distance(p, hp)) for p in points]\n objects = sorted(objects, key=lambda x: x[1])\n return [p for (p, v) in objects]", "def GetTopConfidenceNodes(g, dia, topn=20):\r\n conf_hf = {}\r\n for i in g.nodes(data=True):\r\n n, distr = i\r\n if (distr['class_distribution']['No Diagnosis'] != 0):\r\n conf_hf[n] = distr['class_distribution']['Diagnosis'] / (\r\n distr['class_distribution']['Diagnosis'] + distr['class_distribution']['No Diagnosis'])\r\n nx.set_node_attributes(g, conf_hf, 'confidence')\r\n nodeconf = {i[0]: i[1]['confidence'] for i in g.nodes(data=True) if\r\n (i[1]['class_distribution']['No Diagnosis'] != 0)}\r\n nodeconf_sorted = sorted(nodeconf.items(), key=operator.itemgetter(1), reverse=True)\r\n PrintCodeDescr(g, dia, nodeconf_sorted[:topn], mode=\"node\")", "def extract_edges(graph):\n return graph.get_edges()", "def generate_pairs_lists(\n top, molecule=None, sort_key=None, refer_from_scaling_factor=False\n):\n from gmso.external import to_networkx\n from gmso.parameterization.molecule_utils import (\n molecule_angles,\n molecule_bonds,\n molecule_dihedrals,\n )\n\n nb_scalings, coulombic_scalings = top.scaling_factors\n\n if sort_key is None:\n sort_key = top.get_index\n\n graph = to_networkx(top, parse_angles=False, parse_dihedrals=False)\n\n pairs_dict = dict()\n if refer_from_scaling_factor:\n for i in range(3):\n if nb_scalings[i] or coulombic_scalings[i]:\n pairs_dict[f\"pairs1{i+2}\"] = list()\n else:\n for i in range(3):\n pairs_dict = {f\"pairs1{i+2}\": list() for i in range(3)}\n\n if molecule is None:\n bonds, angles, dihedrals = top.bonds, top.angles, top.dihedrals\n else:\n bonds = molecule_bonds(top, molecule)\n angles = molecule_angles(top, molecule)\n dihedrals = molecule_dihedrals(top, molecule)\n\n if \"pairs12\" in pairs_dict:\n for bond in bonds:\n pairs = sorted(bond.connection_members, key=sort_key)\n pairs_dict[\"pairs12\"].append(pairs)\n\n if \"pairs13\" in pairs_dict:\n for angle in angles:\n pairs = sorted(\n (angle.connection_members[0], angle.connection_members[-1]),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs13\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 2\n ):\n pairs_dict[\"pairs13\"].append(pairs)\n\n if \"pairs14\" in pairs_dict:\n for dihedral in dihedrals:\n pairs = sorted(\n (\n dihedral.connection_members[0],\n dihedral.connection_members[-1],\n ),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs14\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 3\n ):\n pairs_dict[\"pairs14\"].append(pairs)\n\n for key in pairs_dict:\n pairs_dict[key] = sorted(\n pairs_dict[key],\n key=lambda pairs: (sort_key(pairs[0]), sort_key(pairs[1])),\n )\n\n return pairs_dict", "def get_eulerian_graph_edges(bbox, source):\n osm_graph = OSMGraph(bbox, source)\n # input all nodes and get odd nodes, update node attributes\n odd_nodes = get_odd_nodes(osm_graph.nodes_dict)\n\n # initialize all_pairs_list\n all_pairs_list = []\n\n # if there are 6 or fewer odd nodes look for all possible options,\n # otherwise look for just three basic pairing options\n\n if len(odd_nodes) <= 10:\n print(\"ROBUST PAIRING FUNCTION\")\n all_pairs_list = get_list_of_all_pairs_lists(odd_nodes)\n\n else:\n print(\"CHEAP PAIRING FUNCTION\")\n all_pairs_list = get_list_of_all_pairs_lists_short(odd_nodes)\n\n for item in all_pairs_list:\n print(\"\\n\\nPair option:\", item)\n print(\"Pair option len:\", len(item))\n\n dict_pairings_lists_lengths = get_dict_pairings_lists_lengths(\n all_pairs_list, osm_graph\n )\n twice_traversals_edges = get_twice_traversals_edges(dict_pairings_lists_lengths)\n updated_graph_instance = update_twice_traversal_edges(\n twice_traversals_edges, osm_graph\n )\n return updated_graph_instance", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def get_edges(self, topogramId):\n return self.make_request(\"GET\", \"topograms/\"+topogramId+\"/edges\", {})", "def course_lineup(self):\n return self._get_child_page_of_type(CoursesInProgramPage)", "def course_lineup(self):\n return self._get_child_page_of_type(CoursesInProgramPage)", "def eligible_edges(self):\n return self.edges", "def vertices(self):\n return map(Vertex, self._top_exp.vertices())", "def getOutEdges(self):\n edges = []\n for edict in mm.G[self].values():\n for k in edict.keys():\n edges.append(edict.get(k).get(\"edge\"))\n \n return edges", "def train_edges(self):\n return self._train_edges", "def crosses(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelCrosses')", "def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)", "def top30_clients(self):\n clients = self.clients_sorted_by_rentals()\n return clients[:int(0.3 * len(clients))]", "def left_incidence(self):\n linc = []\n for i in range(self.V):\n linc.append([])\n for e in range(self.E):\n i = self.edges[e, 0]\n a = linc[i]\n a.append(e)\n return linc", "def greedy_max_cut(graph):\n cut = Cut(set(), set())\n for vertex in graph.nodes:\n l_neighbors = sum((adj in cut.left) for adj in graph.neighbors(vertex))\n r_neighbors = sum((adj in cut.right) for adj in graph.neighbors(vertex))\n if l_neighbors < r_neighbors:\n cut.left.add(vertex)\n else:\n cut.right.add(vertex)\n return cut", "def topCountries(top=10):\r\n #top 10 deadly countries\r\n countries = agg('country')[:top].index\r\n #grab aggregated data for these countries\r\n dataOfTop10 = agg(['year','country']).query(\"country in @countries\")### interesting...\r\n #unstack data\r\n dataOfTop10 = dataOfTop10.unstack(1)\r\n #remove multiindexes\r\n dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose()\r\n #sort by year\r\n dataOfTop10.sort_index(inplace=True)\r\n return dataOfTop10", "def _find_largest_Rectangles_in_cross_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(Rectangle((i, 0), (j, y), aligned_with_grid=False))\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def edges(self):\n return [(a, b) for a in self._consequences_of\n for b in self._consequences_of[a]]", "def course_pages(self):\n courses = self.program.courses.all()\n return (\n CoursePage.objects.filter(course_id__in=courses)\n .select_related(\"course\", \"thumbnail_image\")\n .order_by(\"course__position_in_program\")\n )", "def get_onscreen_edges(self, graph_edges, graph_corners):\n\n # create equations representing the screen's boarders\n top_left = Point(graph_corners[\"top_left\"].get_x(), graph_corners[\"top_left\"].get_y())\n top_right = Point(graph_corners[\"top_right\"].get_x() + 0.001, graph_corners[\"top_right\"].get_y())\n bottom_left = Point(graph_corners[\"bottom_left\"].get_x() + 0.001, graph_corners[\"bottom_left\"].get_y())\n bottom_right = Point(graph_corners[\"bottom_right\"].get_x(), graph_corners[\"bottom_right\"].get_y())\n top = LineEquation.create_equation(top_left, top_right)\n bottom = LineEquation.create_equation(bottom_left, bottom_right)\n left = LineEquation.create_equation(bottom_left, top_left)\n right = LineEquation.create_equation(bottom_right, top_right)\n\n displayed_edges = []\n\n for edge in graph_edges:\n real_node1 = self.original_graph.get_node_by_serial(edge.node1.serial)\n real_node2 = self.original_graph.get_node_by_serial(edge.node2.serial)\n point1 = Point(real_node1.x, real_node1.y)\n point2 = Point(real_node2.x, real_node2.y)\n edge_equation = LineEquation.create_equation(point1, point2)\n edge.set_slope(edge_equation)\n if self.is_node_onscreen(edge.node1, graph_corners):\n if self.is_node_onscreen(edge.node2, graph_corners):\n # both of edge's node are onscreen\n if edge.node1.get_x() < edge.node2.get_x():\n curr_edge = (real_node1, real_node2, edge.slope, edge_equation)\n else:\n curr_edge = (real_node2, real_node1, edge.slope, edge_equation)\n else:\n # only the edge's first node in onscreen\n curr_edge = self.get_partly_visible_edge(edge, top, bottom, left, right, edge.node1, edge_equation)\n elif self.is_node_onscreen(edge.node2, graph_corners):\n # only the edge's second node is onscreen\n curr_edge = self.get_partly_visible_edge(edge, top, bottom, left, right, edge.node2, edge_equation)\n else:\n # neither of the edge's nodes are onscreen\n curr_edge = self.get_partly_visible_edge(edge, top, bottom, left, right, None, edge_equation)\n\n if curr_edge is not None:\n displayed_edges.append(curr_edge)\n return displayed_edges", "def xedges(self):\n return self.edges[0]", "def edges(self):\r\n return self.capacity.keys()", "def sample_top_neighbors( self, max_count=200 ):\n df = self.copy()\n return df[df['neighbor'] <= max_count].coerce()", "def get_neighbours(self):\n return []", "def cyclic_subgroups(C):\n ps = C.points()\n Gs = []\n \n for i in ps:\n P = Elliptic_Point(i[0],i[1],C)\n Gs.append(cyclic_subgroup(P))\n\n return Gs" ]
[ "0.59826374", "0.55603427", "0.5523582", "0.5495732", "0.5461872", "0.54479295", "0.53818995", "0.5318768", "0.52677137", "0.5246417", "0.520737", "0.5162896", "0.5158995", "0.51440036", "0.51215637", "0.5089466", "0.50795156", "0.50770104", "0.5055389", "0.5054957", "0.50231767", "0.5018502", "0.50170475", "0.50167465", "0.5011203", "0.49917084", "0.49874586", "0.4986527", "0.49810535", "0.4973235", "0.496268", "0.49402386", "0.49360588", "0.49319586", "0.49309492", "0.49151602", "0.49112728", "0.49047297", "0.4902264", "0.48931804", "0.48921493", "0.48907667", "0.4878913", "0.4867042", "0.4863624", "0.4858276", "0.48527858", "0.48464108", "0.48416212", "0.4838148", "0.48285425", "0.48282403", "0.48175022", "0.4811523", "0.4807641", "0.47960082", "0.47897768", "0.47853196", "0.47835624", "0.47816816", "0.4781477", "0.47726724", "0.4769409", "0.47669834", "0.4760161", "0.47597358", "0.47580844", "0.47566685", "0.47566685", "0.47540784", "0.47501132", "0.47483397", "0.4748324", "0.4747337", "0.47449702", "0.47433087", "0.47399402", "0.47398892", "0.47131735", "0.47091636", "0.47091636", "0.4708997", "0.47041902", "0.46998373", "0.46881032", "0.46868113", "0.46835518", "0.4683299", "0.46816126", "0.4673509", "0.46722263", "0.46717304", "0.46713567", "0.4670688", "0.466885", "0.4666261", "0.46588543", "0.46583372", "0.46569124", "0.46560815" ]
0.72403175
0
Return filename of a submission downloaded from synapse.
def downloadSubmissionAndFilename(self, sub, downloadFile=True, **kargs): if isinstance(sub, dict) == False: raise TypeError("input must be a submission (dictionary)") if downloadFile == False: filename = self.getSubmission(sub, downloadFile=False)['filePath'] else: filename = self.getSubmission(sub, downloadFile=True, **kargs)['filePath'] return filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_filename(self) -> str:\n return self._download_filename", "def get_download_file_name(self):\n # Use 'unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.get_url_name()\n else:\n term = 'unknown'\n\n return '{course}-{term}-{number}-{instructors}-{type}{ext}'.format(\n course=self.course_instance.course.get_url_name(),\n term=term,\n number=self.exam_number,\n instructors='_'.join([i.last_name for i in self.instructors]),\n type=self.exam_type,\n ext=self.file_ext)", "def get_filename(self) -> str:\n return self._filename", "def get_filename(self):\n return self.filename", "def _getfilename(self):\n pass", "def get_filename(self):\n return self.ds_filename", "def filename(self):\n return TaskInfo._filename(self.id)", "def get_filename(self):\n return self._filename", "def get_filename(self):\n return self.__filename", "def get_filename(self):\n\n return self._filename", "def download_submission_attachment(self, url):\n\n r = requests.get(url)\n return r.content", "def filename(self):\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)", "def _get_file_name(self) -> str:\n headers = self._get_url_http_headers()\n file_type = self._check_url_file_type(headers)\n file_name = self._get_filename_from_headers(headers)\n\n if not file_name:\n file_name = self._get_filename_from_url()\n\n if file_name is None:\n raise FileNameCannotBeEvaluatedException\n\n if file_type:\n file_name = self._add_file_extension(file_name, file_type)\n\n return file_name", "def get_file_name(self):\n return str(self.get_file())", "def filename(self):\n if self.__filename__ is None:\n return _filename\n else:\n return self.__filename__", "def GetFileName():\r\n d = GetData()\r\n return d.filename", "def GetFilename(self):\n return self._filename", "def get_download_filename(resp, default=None):\n filename = None\n\n content_disposition = resp.headers.get(\"content-disposition\")\n if content_disposition:\n filenames = re.findall(\"filename=(.+)\", content_disposition)\n if filenames:\n filename = filenames[0]\n\n if not filename:\n filename = urlsplit(resp.url).path.rsplit(\"/\", 1)[1]\n\n return filename or default", "def filename(self) -> str:\n return self.__location.filename", "def getFileName(self):\n return self.filename", "def get_filename(self, latex_doc=None):\n if not self.filename:\n if latex_doc is None:\n filename = 'file'\n else:\n filename = latex_doc.filename\n else:\n filename = self.filename\n return self.fix_filename_extension(filename)", "def get_filename( self, default=None, decode=None ):\n return self.get_param( 'filename', None, 'content-disposition', decode=decode ) \\\n or self.get_param( 'name', default, 'content-type', decode=decode )", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def filename(self):\n return self._filename", "def filename(self):\n return self._filename", "def filename(self):\n return self._filename", "def filename(self):\n return self._filename", "def filename(self):\n return self.__filename", "def get_track_filename(self, url = None):\n track_file = urllib.urlopen(url)\n headers = track_file.info()\n track_file.close()\n return wget.filename_from_headers(headers)", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def filename(self):\n return self.system.fileName()", "def content_file_name(instance, filename):\r\n return '/'.join([str(instance.app.publisher.id), str(instance.app.id), filename])", "def filename(self) -> str:\n return self.definition.slug", "def filename(self):\r\n return self._filename", "def get_submission():\n result_files = []\n for filename in os.listdir(\".\"):\n if filename.endswith(\"_output.csv\"):\n result_files.append(filename)\n return result_files[0]", "def filename(self):\n # type: () -> str\n return self._filename", "def get_dest_name ( self ):\n return self.filename", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def get_filename(self) -> str:\n\t\treturn self.xml_name", "def filename(self):\r\n\t\treturn None", "def get_file_name(self):\n return self.file_name", "def name(self):\n return self._filename", "def get_file_name(self):\n return self._fileName", "def download_filename_full(self, doc):\n # todo modify\n authors = \",\".join([x['name'] for x in doc.artists])\n author = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', authors.strip())\n mp3_name = re.sub(\"[\\\\\\\\/:*?\\\"<>|]\", '', doc['name'])\n name = os.path.join(author, \"%s - %s.mp4\" % (author, mp3_name))\n return name", "def get_filename(extended_slug):\n user, project, build_id, job_id = split_extended_slug(extended_slug)\n\n if None in (user, project, build_id, job_id): # todo; remove this\n return\n\n filename_glob = os.path.join(\n test_data_dir,\n user, project,\n '{0}.{1}-*.txt'.format(build_id, job_id))\n filenames = glob.glob(filename_glob)\n if filenames:\n return filenames[0]\n else:\n return None", "def get_filename(self):\n \n for f in os.listdir(self.get_directory()):\n if os.path.isfile(os.path.join(self.get_directory(), f)):\n return f\n \n return None", "def filename(self):\n filename, ext = os.path.splitext(self.file.name)\n return filename.split('/')[-1]", "def get_file_name(self):\n return self.upload.name[6:]", "def get_file_name(self):\n return self.__file_name", "def getFileName(self):\n return self.__fileName", "def get_attachment_filename(_id, unique_id, backup_dir, thread_dir):\n fname = f\"Attachment_{_id}_{unique_id}.bin\"\n source = os.path.abspath(os.path.join(backup_dir, fname))\n if not os.path.exists(source):\n logger.warn(\n f\"Couldn't find attachment '{source}'. Maybe it was deleted or never downloaded.\"\n )\n return None\n\n # Copying here is a bit of a side-effect\n target_dir = os.path.abspath(os.path.join(thread_dir, \"attachments\"))\n os.makedirs(target_dir, exist_ok=True)\n target = os.path.join(target_dir, fname)\n shutil.copy(source, target)\n url = \"/\".join([\".\", \"attachments\", fname])\n return url", "def name_woext(self):\n return os.path.splitext(self._job)[0]", "def get_filename(self, headers):\n header = headers.get('content-disposition')\n\n if header is not None:\n _, params = cgi.parse_header(header)\n filename = params.get('filename')\n else:\n try:\n filename = self.parsed.path.split('/')[-1]\n except IndexError:\n filename = None\n\n return filename if filename is not None else self.DEFAULT_FILENAME", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def filename(self):\n if self.is_persistent is True:\n return self.backend.filename\n\n return None", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def give_filename( url_rel ):\n filename = basename( url_rel )\n\t# Add time information\n now_datetime = datetime.datetime.now( )\n now_string = now_datetime.strftime( \"%Y-%m-%d-%H-%M-%S\" )\n if filename.endswith( '.pdf' ):\n\t\tfileno, ext_pdf = splitext( filename )\n\t\tpdf_filename = fileno + '-' + now_string + ext_pdf\n\t\treturn pdf_filename", "def get_filename(cls):\n return osp.join(cls.dir_location, *cls.file_path)", "def get_name(self):\n return self.file_name", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def _get_report_filename(self):\n report_date = self._get_report_date()\n report_id = self._get_report_id()\n description = ReportMeta.reports[self._report_key]['reportDescription']\n return '{}_{}_{}.pdf'.format(report_id, report_date, description).replace(' ', '_')", "def GetFileName(self):\n return self.file.GetPath()", "def get_filename(url: str) ->str:\n if 'drive.google.com' in url:\n return _extract_google_drive_file_id(url)\n url, filename = os.path.split(url)\n return filename or os.path.basename(url)", "def filename(self):\n return self.tag(\"filename\")", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def get_filename(label):\n return op.splitext(op.splitext(op.basename(label))[0])[0]", "def filename(self) -> Optional[str]:\n ...", "def _get_file_name(id):\n client = Client(DRS_URL)\n c = client.client\n\n # assume id will be NA18537\n response = c.GetDataObject(data_object_id=id).result()\n return response['data_object'][\"name\"]", "def file_name(self):\n _, blob_name = self._get_container_and_blob()\n\n return blob_name", "def best_filename(link, response):\n content_type = response.info().get('content-type', '')\n filename = link.filename # fallback\n # Have a look at the Content-Disposition header for a better guess:\n content_disposition = response.info().get('content-disposition')\n if content_disposition:\n type, params = cgi.parse_header(content_disposition)\n # We use ``or`` here because we don't want to use an \"empty\" value\n # from the filename param:\n filename = params.get('filename') or filename\n ext = splitext(filename)[1]\n if not ext:\n ext = mimetypes.guess_extension(content_type)\n if ext:\n filename += ext\n if not ext and link.url != response.geturl():\n ext = splitext(response.geturl())[1]\n if ext:\n filename += ext\n return filename", "def getFilename(request, pagename, filename):\n if isinstance(filename, unicode):\n filename = filename.encode(config.charset)\n return os.path.join(getAttachDir(request, pagename, create=1), filename)", "def get_attachment_filename(self, filename):\n if self.entry.customer_id:\n path = 'attachs/{kind}/{lang}/{cust}_{filename}'.format(\n kind=self.entry.kind.name,\n lang=self.entry.kind.language,\n cust=self.entry.customer_id,\n filename=filename\n )\n else:\n path = 'attachs/{kind}/{lang}/{filename}'.format(\n kind=self.entry.kind.name,\n lang=self.entry.kind.language,\n filename=filename\n )\n return path", "def whisper_filename(self):\r\n source_name = self.source_id and self.source.name or ''\r\n return get_valid_filename(\"{0}__{1}.wsp\".format(source_name,\r\n self.name))", "def getNoteFileName(self, show, sequence, id):\n idPadded = self.__getPaddedId(id)\n fileName = Mode(show, sequence).get(\"[noteBaseName]\", {\"id\":idPadded})\n\n# log(\"getNoteFileName id: %s fileName: %s\" % (id, fileName))\n\n return fileName", "def file_name(self):\n return self._file_name", "def file_name(self):\n return self._file_name", "def file_name(self):\n return self._file_name", "def file_name(self):\n return self._file_name", "def file_name(self):\n return self._file_name", "def filename_for_resource(resource):\n name = resource['url'].encode('utf8', 'ignore').split('/')[-1]\n return unquote(name)", "def file_name(self):\n return self.lib.file_name", "def file_name(self):\n return self.__file_name", "def fn(self):\n if not self.meta.get(\"FileName\"):\n self.meta[\"FileName\"] = self.tags[\"AccessionNumber\"]\n return self.meta.get('FileName')", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def filename(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_filename(self, *args)", "def get_url_filename(url, headers=None, strip=[]):\n filename = get_url_disposition_filename(url, headers)\n if filename:\n return filename\n return get_url_straight_filename(url, strip=[])", "def _retrieve_filename(file: Union[str, FileStorage]) -> str:\n if isinstance(file, FileStorage):\n return file.filename\n return file", "def get_upload_to(instance, filename):\n\n if instance.rating_decision:\n return 'issuers/%s/rating_decision/%s/%s'\\\n % (\n instance.issuer.lei,\n instance.rating_decision.id,\n filename)\n else:\n return 'issuers/%s/%s' % (instance.issuer.lei, filename)", "def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')", "def get_file_name(self):\n return self.path.name[6:]", "def get_filename(filename):\n \n return utilities.get_filename(filename)", "def get_training_path(instance, filename):\n return f'training/{instance.slug}/training-report.pdf'", "def GetFilename(title, filename = \"\"):\r\n return _hiew.HiewGate_GetFilename(title, filename)", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def _downloaded_filename(self):\n # Peep doesn't support requirements that don't come down as a single\n # file, because it can't hash them. Thus, it doesn't support editable\n # requirements, because pip itself doesn't support editable\n # requirements except for \"local projects or a VCS url\". Nor does it\n # support VCS requirements yet, because we haven't yet come up with a\n # portable, deterministic way to hash them. In summary, all we support\n # is == requirements and tarballs/zips/etc.\n\n # TODO: Stop on reqs that are editable or aren't ==.\n\n finder = package_finder(self._argv)\n\n # If the requirement isn't already specified as a URL, get a URL\n # from an index:\n link = (finder.find_requirement(self._req, upgrade=False)\n if self._req.url is None\n else Link(self._req.url))\n\n if link:\n lower_scheme = link.scheme.lower() # pip lower()s it for some reason.\n if lower_scheme == 'http' or lower_scheme == 'https':\n file_path = self._download(link)\n return basename(file_path)\n elif lower_scheme == 'file':\n # The following is inspired by pip's unpack_file_url():\n link_path = url_to_path(link.url_without_fragment)\n if isdir(link_path):\n raise UnsupportedRequirementError(\n \"%s: %s is a directory. So that it can compute \"\n \"a hash, peep supports only filesystem paths which \"\n \"point to files\" %\n (self._req, link.url_without_fragment))\n else:\n copy(link_path, self._temp_path)\n return basename(link_path)\n else:\n raise UnsupportedRequirementError(\n \"%s: The download link, %s, would not result in a file \"\n \"that can be hashed. Peep supports only == requirements, \"\n \"file:// URLs pointing to files (not folders), and \"\n \"http:// and https:// URLs pointing to tarballs, zips, \"\n \"etc.\" % (self._req, link.url))\n else:\n raise UnsupportedRequirementError(\n \"%s: couldn't determine where to download this requirement from.\"\n % (self._req,))", "def get_name(self):\n return self._file.name", "def GetOutputFilename(self, directory=None):\n if self.forced_filename:\n logging.debug('Forced filename or pre-computed file name = %s', self.filename)\n return self.filename\n\n tags = dict()\n\n # Base tag\n tags['base'] = f\"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}\"\n\n # Setup version subinfo\n tags['version'] = f\" ({self['VERSION']})\" if self[\"VERSION\"] else \"\"\n\n # Setup label / release subinfo\n channels = self.channels if self.channels != '2.0' else ''\n if self[\"ORIGINAL_MEDIUM\"] == \"CD\":\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {channels}\"\n else:\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}\"\n labeltag = labeltag.strip()\n tags['label'] = labeltag and f\" ({labeltag})\"\n\n # Setup disc tag\n if self[\"PART_NUMBER\"]:\n disctag = f\" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}\"\n else:\n disctag = f\" {self['DISC_NAME']}\"\n tags['disc'] = disctag.rstrip()\n\n # Merge into filename\n filename = f\"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}\"\n # Replace invalid characters with either a dash or remove them\n filename = re.compile(\"[<>:/\\\\\\\\]\").sub(\"-\", filename)\n filename = re.compile(\"[|?*]\").sub(\"\", filename)\n # Replace invalid double quotes with valid single quotes\n filename = filename.replace('\"', \"'\")\n\n if directory:\n return os.path.join(directory, filename)\n return filename", "def get_media_filename(media_url):\n return media_url.split(\"/\")[-1]" ]
[ "0.7089564", "0.70576966", "0.69732934", "0.69194645", "0.68737155", "0.68652344", "0.6865232", "0.68460745", "0.6798308", "0.6795707", "0.66493416", "0.66368407", "0.6624542", "0.6617163", "0.66041523", "0.6603908", "0.65955335", "0.65814525", "0.6547018", "0.6510974", "0.6482064", "0.6473516", "0.647251", "0.6468039", "0.6468039", "0.6468039", "0.6468039", "0.6462826", "0.6450731", "0.6447823", "0.6430711", "0.6407951", "0.6404397", "0.63974375", "0.63906324", "0.63854796", "0.63784015", "0.63766545", "0.63668567", "0.63517", "0.632712", "0.6318501", "0.6309806", "0.6307991", "0.63061357", "0.63056016", "0.62843955", "0.62812984", "0.6253808", "0.62465966", "0.6241576", "0.62363356", "0.6235626", "0.6204812", "0.6199703", "0.61966246", "0.6194257", "0.61840904", "0.6181392", "0.61725295", "0.6157008", "0.6149722", "0.6146856", "0.6126085", "0.61226565", "0.6119286", "0.61183065", "0.61169416", "0.610035", "0.609831", "0.6088198", "0.60880065", "0.6086884", "0.6083688", "0.6079774", "0.6078954", "0.6078954", "0.6078954", "0.6078954", "0.6078954", "0.6071668", "0.6067028", "0.6057361", "0.60380965", "0.60357404", "0.60334575", "0.603305", "0.6031823", "0.6008243", "0.6007793", "0.60018015", "0.60007083", "0.59900105", "0.59882116", "0.5975836", "0.59647655", "0.5962224", "0.596114", "0.59582883", "0.5954381" ]
0.6980007
2
Transform relevant object into json object
def json(self, data): import json data = json.dumps(data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_to_json(self, json_data):\n result = {}\n if sys.version_info[0] < 3:\n itr = json_data.__dict__.iteritems()\n else:\n itr = json_data.__dict__.items()\n for key,value in itr:\n # Skip internal attributes.\n if key.startswith(\"__\"):\n continue\n result[key] = self.array_to_json_array(value) if isinstance(value, list) else\\\n self.object_to_json(value) if not self.is_primittive(value) else\\\n value\n return result;", "def object_to_json(self, json_data):\n result = {}\n if sys.version_info[0] < 3:\n itr = json_data.__dict__.iteritems()\n else:\n itr = json_data.__dict__.items()\n for key,value in itr:\n # Skip internal attributes.\n if key.startswith(\"__\"):\n continue\n result[key] = self.array_to_json_array(value) if isinstance(value, list) else\\\n self.object_to_json(value) if not self.is_primittive(value) else\\\n value\n return result", "def object_to_json(self, json_data):\n result = {}\n if sys.version_info[0] < 3:\n itr = iter(json_data.__dict__.items())\n else:\n itr = list(json_data.__dict__.items())\n for key,value in itr:\n # Skip internal attributes.\n if key.startswith(\"__\"):\n continue\n result[key] = self.array_to_json_array(value) if isinstance(value, list) else\\\n self.object_to_json(value) if not self.is_primittive(value) else\\\n value\n return result", "def json_friendly(self):", "def as_json(self):", "def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)", "def to_json(self):\n pass", "def serialize(self, obj):\n return json.dumps(obj)", "def convert_to_json(self):\n return self.__dict__", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def toJSON(cls, obj):\n return json.dumps(obj)", "def jsonizable_object(self):\n obj = {\n 'title': self.title,\n 'url': self.url,\n 'abstract': self.abstract\n }\n if self.metadata:\n obj['metadata'] = self.metadata\n return obj", "def safe_json(self, context):\n serialize_context = dict()\n for key, obj in context.items():\n if isinstance(obj.__class__, ModelBase):\n if hasattr(obj, 'serialize') and callable(getattr(obj, 'serialize')):\n serialize_context[key] = obj.serialize()\n else:\n serialize_context[key] = model_to_dict(obj)\n elif isinstance(obj, QuerySet):\n serialize_context[key] = [o.serialize() for o in obj if hasattr(o, 'serialize')]\n if len(serialize_context[key]) != len(obj):\n serialize_context[key] = [model_to_dict(o) for o in obj]\n elif key == 'extra':\n serialize_context[key] = obj\n # elif key == 'view':\n # continue\n # else:\n # serialize_context[key] = obj\n return dict(success=True, data=serialize_context)", "def jsonify(obj):\n raise NotImplementedError", "def json_converter(obj):\n if isinstance(obj, ErrorReport):\n rdict = obj.__dict__\n return rdict\n try:\n return obj.to_json()\n except AttributeError:\n return obj.__dict__", "def to_json(self) -> JSON:\n pass", "def to_jsondict(obj, view=''):\n return obj.to_jsondict(view) if hasattr(obj, 'to_jsondict') else obj", "def EventToJSON(_object):\n return json.dumps(_object, default=jsonDefault)", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def _jsonify(data: dict):\n j = data.pop('json', None)\n if isinstance(j, dict):\n return j\n if j is None:\n for k, v in data.items():\n if isinstance(v, datetime) or isinstance(v, date):\n data[k] = arrow.get(v).isoformat()\n\n # Create json from kwargs\n j = json.dumps(data)\n return json.loads(j)", "def make_json_compatible(obj_):\n if isinstance(obj_, (numbers.Number, str, bool)) or obj_ is None:\n # these are handled as is\n return obj_\n elif isinstance(obj_, collections.Mapping):\n return {\n make_json_compatible(k): make_json_compatible(v)\n for k, v in obj_.items()\n }\n elif isinstance(obj_, (collections.Iterable, collections.Set)):\n return [make_json_compatible(v) for v in obj_]\n elif isinstance(obj_, (datetime.datetime, datetime.date)):\n return obj_.isoformat()\n\n raise NotImplementedError(\"Dont know how to handle objects of type {}\".format(type(obj_)))", "def as_json(self):\n # if we don't convert it to a dict we'll get a whole bunch of 'can't be serialized' things\n # match = self.__dict__\n # match.pop('_sa_instance_state', None)\n # for k in match:\n #\n # match['date'] = match['date'].isoformat()\n m = self.__dict__\n m['explosions'] = self.explosions.all()\n m['deaths'] = self.deaths.all()\n m['antagobjs'] = self.antagobjs.all()\n m['uplinkbuys'] = self.uplinkbuys.all()\n m['badassbuys'] = self.badassbuy.all()\n m['populationstats'] = self.populationstats.all()\n\n return dict_to_json(m)", "def to_init_json(self) -> JSON:\n pass", "def dump_json(request, obj):\n return obj", "def to_representation(self, object):\n\n finished = object.finished or None\n\n if finished is not None:\n finished = str(finished)\n\n return {\n 'id': object.id,\n 'barcode': object.barcode,\n 'started': str(object.started),\n 'finished': finished,\n 'properties': object.properties\n }", "def jsonify(object):\n # note: ng provides a \"json\" filter that can do this too\n # note: but Django doesn't [https://code.djangoproject.com/ticket/17419]\n if isinstance(object, QuerySet):\n return serialize('json', object)\n return json.dumps(object)", "def _jsonable(self):\n magic_dict = {}\n mman = self.magics_manager\n magics = mman.lsmagic()\n for key, subdict in magics.items():\n d = {}\n magic_dict[key] = d\n for name, obj in subdict.items():\n try:\n classname = obj.__self__.__class__.__name__\n except AttributeError:\n classname = 'Other'\n \n d[name] = classname\n return magic_dict", "def SerializeObject(self, data):\n\n if isinstance(data,dict):\n serializad_data = json.dumps(data)\n else:\n serializad_data = json.dumps(data.__dict__)\n\n return serializad_data", "def encode_json(obj):\n\treturn json.dumps(obj)", "def tojson(self) -> ty.Text:\n return json.dumps(self.todict())", "def toJSON(object):\n\treturn json.dumps(object, ensure_ascii=False)", "def format(self, obj):\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))", "def to_json(self, *args, **kwargs):\n data = self.to_dict()\n\n return json_util.dumps(data)", "def __json_light__(self):\n filtered_dict = dict()\n\n for k, item in six.iteritems(self.__dict__):\n if k.startswith('_') or k == 'annotations':\n continue\n\n if hasattr(item, '__json__'):\n filtered_dict[k] = item.__json__\n else:\n filtered_dict[k] = serialize_obj(item)\n\n return filtered_dict", "def __json__(self):\n filtered_dict = dict()\n\n for k, item in six.iteritems(self.__dict__):\n if k.startswith('_'):\n continue\n\n if hasattr(item, '__json__'):\n filtered_dict[k] = item.__json__\n else:\n filtered_dict[k] = serialize_obj(item)\n\n return filtered_dict", "def osl_encode2json(obj):\n\n content, bundle = osl_encode(obj, False)\n # encoding should not bundle!\n assert bundle == []\n\n return json.dumps(content)", "def to_json(self):\n obj_dict = self.__dict__\n obj_dict[\"_class_\"] = self.__class__.__name__\n return obj_dict", "def to_json(self):\n return json.dumps(self.for_json())", "def convertToJson(self, data):\n return self.__utils.convertToJson(data)", "def to_json_full(self):\n data = self.to_json()\n # TODO: Enable this once custom resource_links are supported again.\n #data['resource_links'] = [\n # r.to_json() for r in self.resource_links\n # if self.resource_links is not None\n #]\n data['commits'] = [\n c.to_json() for c in self.commits if self.commits is not None\n ]\n data['nvd'] = self.nvd._to_json_full(\n ) if self.nvd is not None else None\n data['creator'] = self.creator.to_json(\n ) if self.creator is not None else None\n data['date_created'] = self.date_created\n data['date_modified'] = self.date_modified\n\n return data", "def serialize_to_json(self, queryset):\r\n object_data = []\r\n is_queryset = False\r\n query_fields = self.get_fields()\r\n try:\r\n iter(queryset)\r\n is_queryset = True\r\n raw_data = serializers.serialize('python', queryset, fields=query_fields, use_natural_keys=self.serialize_natural_keys)\r\n except TypeError: # Not iterable\r\n raw_data = serializers.serialize('python', [queryset, ], fields=query_fields, use_natural_keys=self.serialize_natural_keys)\r\n\r\n for obj in raw_data: # Add pk to fields\r\n obj['fields']['pk'] = obj['pk']\r\n object_data.append(obj['fields'])\r\n\r\n if is_queryset:\r\n return object_data\r\n return object_data[0] # If there's only one object\r", "def make_jsons(self):\n self._jsons = [tree.to_json() for tree in self.reaction_trees]\n self._update_route_dict(self._jsons, \"json\")", "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def toJSON(self):\n raise NotImplementedError()", "def to_json(self):\n\n result = super(Snapshot, self).to_json()\n result.update({\n 'snapshot': self.snapshot.to_json(),\n })\n return result", "def tojson_filter(obj, **kwargs):\n # https://github.com/mitsuhiko/flask/blob/master/flask/json.py\n return Markup(dumps(obj, **kwargs))", "def object_to_json(obj):\n if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):\n return obj.isoformat()\n return str(obj)", "def _to_json_dict(self, path_context, memo=None):\n return {'tag': self.tag(), 'data':\n {'meta': self._meta.save(path_context, memo=memo)}}", "def to_json(self):\n return None", "def __json__(self) -> dict[Any, Any]:\n return self.dict(\n include={\n **{k: ... for k in self.dict().keys() if k != \"input\"},\n \"input\": {\n \"dataset\": {\"id\"},\n \"asset\": {\"id\"},\n },\n },\n exclude={\n \"steps\": {\"__all__\": {\"id\"}},\n },\n )", "def convert_context_to_json(self, context):\n\t\t# Note: This is *EXTREMELY* naive; in reality, you'll need\n\t\t# to do much more complex handling to ensure that arbitrary\n\t\t# objects -- such as Django model instances or querysets\n\t\t# -- can be serialized as JSON.\n\t\treturn json.dumps(context)", "def testtojson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual(json.dumps([dicty]), Base.to_json_string([dicty]))", "def _toJSON(self):\n\n return json.encode(self.__toJSON())", "def _convert_to_jsonc(x):\n\n if isinstance(x, dict):\n jsonc_obj = Jsonc()\n # Recursively transform all members of the dict.\n # When converting a dict, we do not convert _name items into private\n # Jsonc members.\n for key, value in x.items():\n jsonc_obj._dict[key] = _convert_to_jsonc(value)\n return jsonc_obj\n elif isinstance(x, list):\n # Recursively transform all members of the list.\n members = []\n for item in x:\n members.append(_convert_to_jsonc(item))\n return members\n else:\n # Return the base object.\n return x", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def to_json(self):\r\n return {'type': self.type, 'name': self.name}", "def convert_for_json(obj):\n if isinstance(obj, datetime.datetime):\n return obj.__str__()\n return obj", "def serialize(self, obj):\n return obj", "def convert_context_to_json(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return json.dumps(context)", "def to_json(self, data, options=None):\r\n options = options or {}\r\n data = self.to_simple(data, options)\r\n\r\n sort_keys = settings.DEBUG\r\n\r\n if django.get_version() >= '1.5':\r\n return json.json.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=sort_keys, ensure_ascii=False)\r\n else:\r\n return simplejson.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=sort_keys, ensure_ascii=False)", "def jsonify(obj):\n d = model_to_dict(obj)\n return json.dumps(d, cls=LazyEncoder)", "def to_json(self):\n data = {}\n if self.name != '':\n data['name'] = self.name\n if self.address is not None:\n data['address'] = self.address.to_json()\n if self.industry_type != '':\n data['industry_type'] = self.industry_type\n if self.industry_size != '':\n data['industry_size'] = self.industry_size\n if self.fiscal_year_start_month != '':\n data['fiscal_year_start_month'] = self.fiscal_year_start_month\n if self.currency_code != '':\n data['currency_code'] = self.currency_code\n if self.time_zone != '':\n data['time_zone'] = self.time_zone\n if self.date_format != '':\n data['date_format'] = self.date_format\n if self.field_separator != '':\n data['field_separator'] = self.field_separator\n if self.language_code != '':\n data['language_code'] = self.language_code\n if self.tax_basis != '':\n data['tax_basis'] = self.tax_basis\n if self.org_address != '':\n data['org_address'] = self.org_address\n if self.remit_to_address != '':\n data['remit_to_address'] = self.remit_to_address\n if self.tax_type != '':\n data['tax_type'] = self.tax_type\n return data", "def jsonify(obj):\n return json.loads(json.dumps(obj, default=default_encoder))", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__)", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def __json_encode__(self) -> Dict[str, Any]:\n return {\"figure\": self.figure, \"name\": self.name, \"metadata\": self.metadata}", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(), *args, **kwargs)", "def default(self, obj): # pylint: disable=method-hidden\n if isinstance(obj, (ArmCalcInput, ArmCalcOutput)):\n return obj.__dict__\n elif isinstance(obj, (datetime, date)):\n return to_wcf_date(obj)\n return super(ArmCalcJsonEncoder, self).default(obj)", "def to_json(self, **kwargs):\n return dumps(self, **kwargs)", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def toJson(self):\r\n return self.__dict__", "def to_json(self):\n return json.dumps(self._asdict())", "def dict_2_json(obj, filename):\n\twith open('data/output/' + filename, 'w') as fp:\n\t\tjson.dump(obj, fp, indent=4)", "def to_json(self):\n return json.dumps(self.dict)", "def to_json(self):\n return json.dumps(self, default=lambda i: i.__dict__)", "def to_json(self):\n related = {'people': list(set([p.n for p in self.people.all()])),\n 'places': list(set([p.reg for p in self.places.all()])),\n 'organizations': list(set([o.n for o in self.orgs.all()])),\n 'keywords': list(set([k.reg for k in self.ref_strings.all()]))}\n return {'id': self.id,\n 'date': self.date,\n 'section': self.section_type,\n 'subsection': self.subsection_type,\n 'article_type': self.article_type,\n 'xpath': self.xpath,\n 'content': self.content,\n 'related': related}", "def to_json_string(my_obj):\n return (json.dumps(my_obj))", "def to_json_string(my_obj):\n return (json.dumps(my_obj))", "def toJSON(self) -> str:\r\n try:\r\n _ = json.dumps(self.value)\r\n value = self.value\r\n except (TypeError, OverflowError):\r\n value = {}\r\n value['object_type'] = self.value.__class__.__name__\r\n if isinstance(self.value, RawData):\r\n type_str = '_{}__'.format(value['object_type'])\r\n for key, data in self.value.__dict__.items():\r\n value[key.replace(type_str, '')] = data\r\n else:\r\n type_str = '_{}'.format(value['object_type'])\r\n for key, data in self.value.__dict__.items():\r\n value[key.replace(type_str, '')] = data\r\n\r\n return json.dumps({'object_type' : 'RawData', 'version' : self.version,\r\n 'timestamp' : self.timestamp, 'label' : self.label,\r\n 'value' : value})", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def default(self, obj):\n \n if isinstance(obj, np.ndarray):\n return list(obj)\n\n if isinstance(obj, uuid.UUID):\n return str(obj)\n\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n \n if isinstance(obj,TPC):\n return obj._so()\n \n # No special handling called for; pass through\n return json.JSONEncoder.default(self, obj)", "def make_json(result):\n new_result = result.to_dict()\n json_result = json.dumps(new_result, indent=4)\n return json_result", "def to_json(obj: Any) -> str:\n return mark_safe(json.dumps(obj))", "def JsonComplexEncoder(obj):\n if isinstance(obj, bytes):\n return str(obj)\n else:\n return obj", "def json_serial(obj):\n if isinstance(obj, LegipyModel):\n return obj.to_json()\n elif isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n raise TypeError(\"Type {0} not serializable\".format(repr(type(obj))))", "def tojson(python_object):\n return json.JSONEncoder().encode(python_object)", "def to_json(self):\n new_dictionary = {}\n\n for name in self._fields.keys():\n new_dictionary[name] = getattr(self, name)\n\n return json.dumps(new_dictionary)", "def to_json(self) -> dict:\n json_dict = {\n \"id\": self.id,\n \"movement_id\": self.movement_id,\n \"poster\": self.poster.to_json(),\n \"message\": self.message,\n \"created_time\": str(self.created_time.astimezone())\n }\n\n if self.updated_time:\n json_dict[\"updated_time\"] = str(self.updated_time.astimezone())\n else:\n json_dict[\"updated_time\"] = None\n\n return json_dict", "def serialize(self):\n _json = {}\n for f in self._fields:\n # Check if index has function for custom serializing and call it, if exists\n if hasattr(self, 'prepare_%s' % f.name) and\\\n hasattr(getattr(self, 'prepare_%s' % f.name), '__call__'):\n _json[f.name] = getattr(self, 'prepare_%s' % f.name)(self.instance)\n else:\n _json[f.name] = f.serialize(self.instance)\n return _json", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(primitive=True), *args, **kwargs)" ]
[ "0.69116586", "0.6882489", "0.68524873", "0.6822507", "0.6781035", "0.66899097", "0.65613455", "0.65372014", "0.65137166", "0.647605", "0.6443874", "0.6437841", "0.64342636", "0.6407022", "0.63898367", "0.6371773", "0.6358821", "0.63180697", "0.6306601", "0.6302604", "0.630055", "0.6299254", "0.6292629", "0.6289547", "0.6285476", "0.62702996", "0.6268198", "0.6263029", "0.62411964", "0.62240183", "0.62231326", "0.61998904", "0.6192317", "0.6184339", "0.617507", "0.61721075", "0.61502725", "0.6145138", "0.6132072", "0.61213833", "0.61207664", "0.61107886", "0.6099473", "0.60968435", "0.60962015", "0.60906285", "0.60738724", "0.6072874", "0.6067394", "0.60613847", "0.60606873", "0.60507315", "0.60503274", "0.6050157", "0.6042044", "0.60410666", "0.60409987", "0.6037795", "0.602933", "0.60258895", "0.60238487", "0.6007845", "0.6004694", "0.6001976", "0.59953904", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.59929574", "0.5991856", "0.59824663", "0.5980213", "0.597688", "0.59689426", "0.59689426", "0.5967427", "0.59637344", "0.59592605", "0.5956264", "0.5950422", "0.5947234", "0.5947226", "0.5947226", "0.5944247", "0.59341115", "0.59286207", "0.5924024", "0.59213775", "0.59201515", "0.59175444", "0.5912609", "0.59104085", "0.5910295", "0.59080476", "0.59069556" ]
0.0
-1
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
def bmshj2018_factorized( quality, metric="mse", pretrained=False, progress=True, **kwargs ): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model( "bmshj2018-factorized", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bias_prior(self):", "def lnprior(self):\n \n return", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def front_column_model_p_gain():", "def prior(kernel_size, bias_size): #removed dtype=None, unused argument\n number = kernel_size + bias_size\n prior_model = keras.Sequential(\n [\n tfp.layers.DistributionLambda(\n lambda t: tfp.distributions.MultivariateNormalDiag(\n loc=tf.zeros(number), scale_diag=tf.ones(number)\n )\n )\n ]\n )\n return prior_model", "def buildZPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.z_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_pzgxw + T.log(self.hyper['num_clust'])), axis=3), axis=[1,2])\r\n\r\n self.z_prior_modif = - T.maximum(self.hyper['treshold_z_prior'], - self.z_prior)", "def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"", "def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()", "def _compute_mix_prior(self):\n if np.all(self.mix_prior == 1):\n return 0\n return np.dot(np.log(self.mix_weight).T, (self.mix_prior - 1))", "def predict_proba(self):\n ...", "def setModelPrior(self,A,B,C,priorWeight):\n Cpattern = self.coeffPattern[2]\n for i in range(self.m):\n ai = A[i,:].tolist()\n bi = B[i,:].tolist()\n (xuc,constant) = self._toEstimator(i,ai,bi)\n if Cpattern == None or Cpattern[i] == None:\n xuc[-1] = C[i]\n self.estimators[i].setPrior(np.array(xuc),priorWeight)\n return", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values", "def prior(cube, ndim, nparams):\n # construct prior from recovery file\n counter = 0\n if params2 is None:\n return\n for key in params2.keys():\n nparams_tmp = int(params2[key]['nparams'])\n for ii in range(nparams_tmp):\n # sp = [name, prior type, x1, x2]\n sp =\\\n params2[key]['param'+str(ii+1)].split(',')\n if sp[1][0] == 'U' and sp[2][:5]=='param' and sp[3][:5]=='param':\n subtract1 = int(key[-1]) - int(sp[2][-1])\n subtract2 = int(key[-1]) - int(sp[3][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract1], cube[counter-subtract2])\n elif sp[1][0] == 'U' and sp[2][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract], float(sp[3]))\n elif sp[1][0] == 'U' and sp[3][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n float(sp[2]), cube[counter - subtract])\n else:\n cube[counter] = GeneralPrior(cube[counter], sp[1], float(sp[2]),\n float(sp[3]))\n counter += 1", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def prior_model(self) -> Collection:\n return Collection(self.prior)", "def compute_log_prior(self,params: ndarray) -> float:\n ln_tE = params[0]\n ln_A0 = params[1]\n ln_deltaT = params[2]\n fbl = params[3]\n mb = params[4]\n\n # Equation (16,15,17) (note that Albrow uses \"log\" for log10)\n log10e = np.log10(np.exp(1))\n ln_pr_ln_tE = np.log(0.476) - ((log10e*ln_tE - 1.333)**2 / 0.330) + np.log(log10e)\n ln_pr_ln_A0 = np.log(0.660) - (1.289*log10e*ln_A0) + np.log(log10e)\n ln_pr_ln_deltaT = np.log(0.156) - ((log10e*ln_deltaT - 1.432)**2 / 0.458) +\\\n np.log(log10e)\n \n # Paper doesnt mention the prior used, but I assume it to be uniform\n ln_pr_fbl = uniform.logpdf(fbl,0.0,1.0)\n\n # Paper doesnr mention the prior used but I will asuumed it to be uniform\n ln_pr_mb = uniform.logpdf(mb,self.mag_min - 1.0, self.mag_max + 1.0)\n \n \n return ln_pr_fbl + ln_pr_ln_A0 + ln_pr_ln_deltaT + ln_pr_ln_tE + ln_pr_mb", "def get_prior(self):\n assert self._prior in self._priors, 'Unsupported prior! Check the _priors attribute for a list of priors.'\n if self._prior == 'Gaussian':\n prior = 0.5 * torch.sum(self.parameters ** 2)/self.prior_var\n elif self._prior == 'Cauchy':\n dimconst = (self.parameters.shape[0] + 1)/2.\n prior = dimconst*torch.log(self.prior_var + torch.sum(self.parameters ** 2))\n elif self._prior == 'Sparse':\n n = self.dataset.shape[1]\n gauss_prior = 0.5 * torch.sum(torch.exp(self.parameters[-1] * torch.exp(self.parameters[n:2*n]) * self.parameters[:n] ** 2))\n gamma_density = torch.distributions.Gamma(1.5,0.5)\n# gamma_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n# lambda_density = torch.distributions.Gamma(1.5,0.5)\n lambda_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n prior = gauss_prior + lambda_prior\n return prior", "def set_prior_priorunc_synthetic(self):\n\n lai_coeff_absunc = None\n statevec_absunc = None\n\n #-- \n if self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n elif self.use_generic_prior:\n self._setprior_generic_agriculture()\n statevec_absunc = self.generic_prior_unc\n else:\n #-- overall number of time-points in schedule\n npts = self.get_npts()\n\n #-- default prior file\n prior_file = os.path.join(ipt_dir_path, 'mni_stat_jules_2017.csv')\n\n #-- get signature simulator default state\n msg = \"START reading state variables from file ***{}***...\".format(prior_file)\n FileLogger.info(msg)\n state_inst = sv.get_state_csv(fname=prior_file, fmt='%Y-%m-%d %H:%M:%S' )\n msg = \"...reading DONE\"\n FileLogger.info(msg)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n # print \"MVMV::nearest={} idx={} timedelt={}\".format(\n # state_inst.date_utc[idx], idx, timedelt)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]\n\n #-- set uncertainty values\n self._set_priorunc(statevec_absunc=statevec_absunc, lai_coeff_absunc=lai_coeff_absunc)", "def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])", "def prior(store):\n mu = zeros(store['beta'].shape[0])\n Prec = diag(0.005 * ones(store['beta'].shape[0]))\n return -0.5 * dot(store['beta'].transpose(), dot(Prec, store['beta']))", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def lnprior(params):\n a, b, f = params\n if -10.0 < b < 0. and 0. < a < 10 and 0. < f:\n return 0.0\n\n return -np.inf", "def prior(\n self,\n next_state: np.ndarray,\n state: np.ndarray,\n control_x: Optional[np.ndarray] = None\n ) -> np.ndarray:\n pass", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def log_prior(self, params):", "def add_prior(self, prior):\n if self.rate_variation:\n # Gamma prior with mean 1 over all mutation rates\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRatePrior.s:%s\" % self.name, \"name\":\"distribution\"})\n compound = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRateCompound:%s\" % self.name, \"spec\":\"beast.core.parameter.CompoundValuable\", \"name\":\"x\"})\n plate = ET.SubElement(compound, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"var\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n gamma = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRatePriorGamma:%s\" % self.name, \"spec\":\"beast.math.distributions.Gamma\", \"name\":\"distr\", \"alpha\":\"@featureClockRateGammaShape:%s\" % self.name, \"beta\":\"@featureClockRateGammaScale:%s\" % self.name})\n # Exponential hyperprior on scale of Gamma prior\n # Exponential prior favours small scales over large scales, i.e. less rate variation\n # Mean scale 0.23 chosen for general sensibility, e.g.:\n # - Prior distribution is roughly 50/50 that ratio of fastest\n # to slowest feature rate in a dataset of size 200 is below\n # or above 10.\n # - Prior probability of roughly 0.90 that this ratio is below\n # 100.\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRateGammaScalePrior.s:%s\" % self.name, \"name\":\"distribution\", \"x\":\"@featureClockRateGammaScale:%s\" % self.name})\n ET.SubElement(sub_prior, \"Exponential\", {\"id\":\"featureClockRateGammaShapePriorExponential.s:%s\" % self.name, \"mean\":\"0.23\", \"name\":\"distr\"})", "def prior_sample(self):\n pass", "def setupMixedPrior(self):\n\n if self.namePrior.find('mixed') < 0:\n return\n\n # we set up the default parameters for bounded flat prior,\n # then update them with non-flat examples\n if np.size(self.hyper) < 7:\n self.setupDefaultPars()\n\n # Adjust the hyperparameters for defaults.\n self.hyper[0][2] = 0.45\n self.hyper[1][2] = 0.05\n self.hyper[0][3] = 16.3\n self.hyper[1][3] = 0.1\n\n nMeths = np.shape(self.hyper)[-1]\n self.mixedNames = ['binaryBoundedOne' for i in range(nMeths)]\n\n ### Let's try some gaussians. Eccentricity and period\n self.mixedNames[2] = 'gaussianOne'\n self.mixedNames[3] = 'gaussianOne'\n\n self.findMixedMethods()", "def analysis(self) -> \"PriorFactor\":\n return self", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def _model(self):\n common_scale = self.edp_par['common_scale'].value\n model = self.F_trans() * self.F_cont()\n # get F(h=1,k=0), which is used for normalization \n # common_scale is a common scaling factor => F(h=1,k=0) = 100*common_scale\n F_10 = model[(self.h==1)&(self.k==0)]\n model = model / np.absolute(F_10) * 100 * common_scale\n return model", "def set_prior_priorunc_general(self):\n\n #-- some configurations apply absolute uncertainties\n lai_coeff_absunc = None\n statevec_absunc = None\n is_generic_prior = False\n\n #--\n if self.prior_states_file!=None:\n states_file = self.prior_states_file\n basename = os.path.basename(states_file)\n if os.path.splitext(basename)[1]=='.nc':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_jules(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n elif os.path.splitext(basename)[1]=='.csv':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_csv(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n else:\n msg = \"Unrecognised format of states file ***{}***. Cannot continue!\".format(\n states_file)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n return\n elif self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n else:\n self._setprior_generic_agriculture()\n is_generic_prior = True\n statevec_absunc = self.generic_prior_unc\n\n #-- set uncertainty values\n self._set_priorunc( lai_coeff_absunc=lai_coeff_absunc,\n statevec_absunc=statevec_absunc,\n is_generic_prior=is_generic_prior )", "def __init__(self,data,pos,neg,bk,prior,target,max_depth=2):\n\n self.data = data\n self.examples = {}\n self.pos = pos\n self.neg = neg\n #initial model assumed P(target|data)=0.5 to target=1,0\n for ex in pos+neg:\n self.examples[ex] = prior[ex]\n self.bk = bk\n self.target = target\n self.max_depth = max_depth\n self.boosted_trees = []", "def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def bmshj2018_hyperprior(\n quality, metric=\"mse\", pretrained=False, progress=True, **kwargs\n):\n if metric not in (\"mse\", \"ms-ssim\"):\n raise ValueError(f'Invalid metric \"{metric}\"')\n\n if quality < 1 or quality > 8:\n raise ValueError(f'Invalid quality \"{quality}\", should be between (1, 8)')\n\n return _load_model(\n \"bmshj2018-hyperprior\", metric, quality, pretrained, progress, **kwargs\n )", "def eval_prior(self, state, action):\n\n return np.dot(state, self.a.T) + np.dot(action, self.b.T)", "def lfads_decode_prior(params, lfads_hps, key, z_sample):\n\n ib, g0, ii_txi = decompose_sample(lfads_hps, z_sample)\n ib = np.where(lfads_hps['do_tanh_latents'], np.tanh(ib), ib) \n g0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(g0), g0)\n ii0 = params['ii0']\n ii0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(ii0), ii0)\n # ii tanh'd at the decode loop to keep prior routines similar to inference.\n\n # Since the factors feed back to the controller,\n # factors_{t-1} -> controller_t -> sample_t -> generator_t -> factors_t\n # is really one big loop and therefor one RNN.\n f0 = params['f0'] \n\n # Make all the randomness for all T steps at once, it's more efficient.\n # The random keys get passed into scan along with the input, so the input\n # becomes of a 2-tuple (keys, actual input).\n T = ii_txi.shape[0]\n keys = random.split(key, 2)\n keys_t = random.split(keys[0], T)\n\n state0 = (ii0, ib, g0, f0)\n decoder = partial(lfads_decode_prior_one_step_scan, *(params, lfads_hps))\n _, state_and_returns_t = lax.scan(decoder, state0, (keys_t, ii_txi))\n\n g_t, f_t, ii_t, ib, lograte_t = state_and_returns_t\n return (g_t, f_t, ii_t, ib, lograte_t, g0, ii0)", "def predict(self, u=0):\n\n self.predict_x(u)\n \n W = np.concatenate((dot(self.F, self.U), self.Uq), axis=1)\n D = np.concatenate((self.D, self.Dq))\n self.U, self.D = mwgs(W, D)\n\n # save prior\n self.x_prior = np.copy(self.x)\n self.U_prior = np.copy(self.U)\n self.D_prior = np.copy(self.D)", "def boosting(train_data, dim, t):\n w = []\n w.append([float(1) / float(len(train_data))] * len(train_data))\n\n # Store models in m, models are stored as a tuple with the w_vector as well\n # as the t_vector\n\n m = []\n\n for i in range(t):\n print(\"Iteration \" + str(i + 1) + str(\":\"))\n t_vec, w_vec, error = binary_classifier(train_data, dim, w[i])\n alpha = 0.5 * math.log(float(1 - error) / float(error))\n print(\"Error = \" + str(error))\n print(\"Alpha = \" + str(alpha))\n if error >= 0.5:\n break\n # Add model only if it has error rate less than 0.5\n m.append((t_vec, w_vec, alpha))\n\n is_increase_weights_printed = False\n is_decrease_weights_printed = False\n factor_to_increase = 0\n factor_to_decrease = 0\n # Update weights by figuring out which points that are misclassified\n w.append([0] * len(train_data))\n for j in range(len(train_data)):\n if np.dot(train_data[j][0:dim], w_vec) > t_vec:\n if train_data[j][dim] == -1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n else:\n if train_data[j][dim] == 1:\n # misclassified\n w[i + 1][j] = float(w[i][j]) / float(2 * error)\n if not is_increase_weights_printed:\n factor_to_increase = float(1) / float(2 * error)\n is_increase_weights_printed = True\n else:\n # correctly classified\n w[i + 1][j] = float(w[i][j]) / float(2 * (1 - error))\n if not is_decrease_weights_printed:\n factor_to_decrease = float(1) / float(2 * (1 - error))\n is_decrease_weights_printed = True\n\n print(\"Factor to increase weights = \" + str(factor_to_increase))\n print(\"Factor to decrease weights = \" + str(factor_to_decrease))\n\n return m", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def proba_redefined_predict(model,X,weigh,classes=string.ascii_lowercase):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,classes)\n \n return predict", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def prior(self, c, labeled):\n return log(len(labeled[c])/self.N_features)", "def prediccion(self):\n # Project the state ahead\n self.X = self.F @ self.X + self.B @ self.M\n self.P = self.F @ self.P @ self.F.T + self.Q\n\n return self.X", "def _screener_init(self):\n subject = 0\n self.model.uu.remove(subject) # selects from untested and performs e\n self.model.tt.append(subject)\n self.model.b -= (self.cz + self.cy) # update budget\n self.model.z[subject] = self.z[subject] # update model\n self.model.y[subject] = self.y[subject]", "def predict_proba(self, X):\n proba = np.ones((X.shape[0], 2), dtype=np.float64)\n proba[:, 1] = super(FMClassifier, self).predict(X)\n proba[:, 0] -= proba[:, 1]\n return proba", "def prior(old_params,params):\n \n for s in range(len(params)):\n if params[s] < 0.0 or params[s] > 2:\n return 0\n return 1", "def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p", "def process_prior(tmp, model_num):\n\n if re_prior_const.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.constant, value=float(tmp[1]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_normal.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.normal, mean=float(tmp[1]), variance=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_uni.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.uniform, lower_bound=float(tmp[1]), upper_bound=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_logn.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.lognormal, mu=float(tmp[1]), sigma=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n elif re_prior_gamma.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.gamma, shape=float(tmp[1]), scale=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n else:\n sys.exit(\"\\nSupplied parameter prior %s unsupported\" % tmp[0])\n\n return prior_params", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def __init__(self):\n self.clf = DummyClassifier(strategy='most_frequent')", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def predict(self, x):\n (N,D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n dr = (1 + np.exp(-1*k1))\n nr = 1.0\n f_x = nr / dr\n f_x1 = f_x.reshape((N,))\n y1 = np.zeros(N)\n for i in range(N):\n if f_x1[i] > 0.5:\n y1[i] = 1\n else:\n y1[i] = -1\n\n\n y2 = y1.astype(int)\n return(y2)\n\n\n #raise NotImplementedError", "def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x", "def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\n self.sumWeight = priorWeight\n self.scale = 1\n self.AtA = np.eye(self.n)*priorWeight\n self.AtAinv = np.eye(self.n)/priorWeight\n self.Atb = xPrior*priorWeight\n self.btb = np.dot(xPrior,xPrior)*priorWeight\n self.degenerate = False\n self.x = xPrior", "def __init__(self,model,alpha=0,head_min=0,head_max=1,k=1,\r\n variables=[],priors=[]):\r\n \r\n import numpy as np\r\n \r\n # Append the base to the elementlist\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # Set orientation value\r\n self.alpha = alpha\r\n \r\n # Set potential scaling variables\r\n self.head_min = head_min\r\n self.head_max = head_max\r\n \r\n # Assign the hydraulic conductivity of the base model\r\n self.k = k\r\n \r\n # The model requires the base flow in terms of hydraulic potential (phi)\r\n # The function head_to_potential extracts the following variables:\r\n # phi_min hydraulic potential corresponding to head_min\r\n # phi_max hydraulic potential corresponding to head_max\r\n self.head_to_potential()\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.variables += [var]\r\n self.model.priors += [self.priors[idx]]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def training_forward(self, x):\n if self.project_parameters.loss_function == 'BCELoss':\n return self.activation_function(self.backbone_model(x))\n elif self.project_parameters.loss_function == 'CrossEntropyLoss':\n return self.backbone_model(x)", "def P_prior(self):\n return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))", "def _call(self, x):\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)", "def _pre_fit(self):\n pass", "def _call(self, x):\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)", "def try_latent_topics_intro_model(k):\n highest_f1 = 0\n print \"start time: {}\".format(datetime.now())\n print \"using {} latent topics\".format(k)\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = topic_features\n X_train, y_train = prep.subset(features)\n print \"regular data prep complete\"\n print topic_features\n\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n\n mc = ModelChooser([rf, gb])\n mc.fit_predict(X_train, y_train)\n mc.print_results()\n\n for i, score in enumerate(mc.f1_scores):\n if score > highest_f1:\n highest_f1 = score\n best_n_latent_features = k\n if i == 0:\n best_model_type = \"Random Forest\"\n else:\n best_model_type = \"Gradient Booster\"\n\n\n print \"end time: {}\".format(datetime.now())\n print \"-\"*10\n results = \"f1 score was {} with {} latent features on {} model\".format(highest_f1, best_n_latent_features, best_model_type)\n print results\n return results", "def prior(X, ls, kernel_func=rbf,\n ridge_factor=1e-3, name=None):\n X = tf.convert_to_tensor(X, dtype=tf.float32)\n N, _ = X.shape.as_list()\n\n K_mat = kernel_func(X, ls=ls, ridge_factor=ridge_factor)\n\n return ed.MultivariateNormalTriL(loc=tf.zeros(N, dtype=tf.float32),\n scale_tril=tf.cholesky(K_mat),\n name=name)", "def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor:\n # The EfficientFormerClsHead doesn't have other module, just return\n # after unpacking.\n return feats[-1]", "def prior_z(self) -> distributions.Distribution:\n return distributions.Categorical(self.pi)", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def proba_redefined_predict(model,X,weigh):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,model.classes_)\n\n return predict", "def get_prior(self):\n return self.prior", "def log_prior(self):\n raise NotImplementedError(\"the log_prior property should \"\n \"be defined in the Estimator sub-class\")", "def get_fwhm_prior(self,freq,feed):\n self.fitted_fwhm = {feed:np.poly1d(fit) for feed,fit in Data.average_beam_widths.items()}\n\n\n if (self.fwhm_prior == 'ModelFWHMPrior'):\n P0_priors={'sigx':{'mean':self.xfwhm(freq)/60./2.355,\n 'width':self.xfwhm(freq)/60./2.355/1e2}}\n elif (self.fwhm_prior == 'DataFWHMPrior'):\n P0_priors={'sigx':{'mean':self.fitted_fwhm[feed](1./freq)/60./2.355,\n 'width':self.fitted_fwhm[feed](1./freq)/60./2.355/1e2}}\n else:\n P0_priors = {}\n\n return P0_priors", "def __init__(self, pseudocount=0):\n self._model = dict()\n self._alpha = pseudocount\n self._N = 0\n self._V = 0", "def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)", "def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def cv_reweighting(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n \n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gamma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn initial probability dset:\", dset)\n clf1.fit(X_train, y_train)\n return clf1.score(Xts, Yts)\n if run == 1:\n print(\"calculating weighting dset:\", dset)\n\n probS = clf1.predict_proba(X_train)\n weights = estimateBeta(y_train, probS, 0.2, 0.4)\n\n for i in range(len(weights)):\n if weights[i] < 0:\n weights[i] = 0.0\n if run == 1:\n print(\"fit final model dset:\", dset)\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, C=0.8, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, C=.4, max_iter=max_itera)\n\n clf2.fit(X_train, y_train, sample_weight=weights)\n\n return clf2.score(Xts, Yts)", "def rescale_prior(predictionmatrix, priorprobs):\n\tpriorprobs = np.trim_zeros(priorprobs, 'f')\n\tN, C = np.shape(predictionmatrix)\n\tassert C == np.size(priorprobs)\n\tpriorprobs = priorprobs / np.sum(priorprobs).astype(float)\n\taverageprediction = np.mean(predictionmatrix, axis = 0)\n\tfactor = priorprobs / averageprediction\n\tresult = predictionmatrix * np.repeat(factor[None,:],N,0)\n\treturn normalize_probabilities(result)", "def _construct_sample_from_prior(self):\n z_sym = T.matrix()\n x_sym = T.matrix()\n irs = self.ir_steps\n oputs = [self.obs_transform(self.s0)]\n oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])\n _, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)\n sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \\\n givens={ self.z: z_sym, \\\n self.x_in: T.zeros_like(x_sym), \\\n self.x_out: T.zeros_like(x_sym), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.scan_updates)\n def prior_sampler(samp_count):\n x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )\n old_switch = self.train_switch.get_value(borrow=False)\n # set model to generation mode\n self.set_train_switch(switch_val=0.0)\n z_samps = to_fX( npr.randn(samp_count, self.z_dim) )\n model_samps = sample_func(z_samps, x_samps)\n # set model back to either training or generation mode\n self.set_train_switch(switch_val=old_switch)\n return model_samps\n return prior_sampler", "def model_train(x_train, y_train):\n\n global dic\n K = y_train.shape[1] ###10类\n model=[]##保存模型\n for k in range(K):\n data=x_train[y_train[:,k]==1]\n D,N=data.shape##60000,784\n print (D,N)\n\n pai=np.ones(K)/K\n Q=30\n bias=np.exp(-700)\n ##hidden variable Q*1\n # Z=np.array(np.random.normal(loc=0,scale=0.1,size=Q).reshape([Q,1]))##对于隐变量\n ##mean N*1\n miu=np.array([np.mean(data,axis=0)]*K).reshape(K,N,1)\n ##Factor Loading W N*Q\n scale = np.power(np.linalg.det(np.cov(data)), (1 / N))\n W = np.array(np.random.randn(K,N,Q))*np.sqrt(scale/Q)\n W_and_miu_new=np.array(np.zeros(shape=[K,N,Q+1]))\n # for k in range(K):\n # W_and_miu_new[k] = np.column_stack((W[k], miu[k]))\n ##variance D\n psi=np.diag(np.cov(data,rowvar=False))+bias\n print ('dasas',psi.shape)#####维度为(100,)\n ##Beta K##\n beta=np.zeros(shape=[K,Q,N])\n smooth = 0.1 * np.eye(100, M=None, k=0)\n # print (beta)\n const=(2*np.pi)**(-D/2)\n\n # print (scale)\n newloglikelyhood=0\n oldloglikelyhood=1001\n Ez_w_x=np.zeros(shape=[D,K,Q,1])#####60000*10*Q\n Ezz_w_x=np.zeros(shape=[D,K,Q,Q])####Q*10*Q\n Ez_w_x_2 = np.zeros(shape=[D, K, Q+1, 1])\n Ezz_w_x_2 = np.zeros(shape=[D, K, Q+1, Q+1])\n rnk = np.array([np.zeros(K) for i in range(D)])###初始rnk表 60000*10\n # print (rnk.shape)\n # while np.abs(oldloglikelyhood-newloglikelyhood)>0.0001: ###10类\n # while np.abs(oldloglikelyhood-newloglikelyhood)>500:\n for ite in range(10):\n # oldloglikelyhood=newloglikelyhood\n print ('迭代')\n\n ##-----------EEEE-step----------------##\n ##get responsibility of all data##\n for i in range(D):\n for k in range(K):\n # print (np.matmul(W[k],W[k].T).shape,psi.shape)\n cov=np.matmul(W[k],W[k].T)+np.diag(psi)\n\n # print (data[i].reshape(data[i].shape[0],1),miu[k].shape)\n mean=data[i].reshape(data[i].shape[0],1)-miu[k]\n # print(mean.shape)\n Gaussian=stats.norm.pdf(data[i],mean.reshape(-1),cov)\n # print(data[i])\n # print('得出的高斯函数值',Gaussian.pdf(data[i]))\n rnk[i][k]=pai[k]*np.mean(Gaussian)\n ##------------------------------------------##\n ##计算Ez和Ezz\n tem = psi + np.matmul(W[k], W[k].T)\n if np.linalg.det(tem) == 0:\n beta[k] = np.matmul(W[k].T, np.linalg.pinv(tem))\n # tem[0][0] = tem[0][0] + bias * 0.01\n else:\n tem = tem\n # print (np.matmul(W[k].T, np.linalg.inv(tem)))\n beta[k] = np.matmul(W[k].T, np.linalg.inv(tem))\n diff = data[i].reshape(data[i].shape[0],1) - miu[k]\n # diff = diff.reshape(diff.shape[0], 1)\n ##calculate E[z|w_k,x_i]\n Ez_w_x[i][k] = np.matmul(beta[k], (diff))\n data_i = data[i]\n # print ('qqqq', data_i.shape)\n data_i = data_i.reshape(data_i.shape[0], 1)\n line_one = np.ones(shape=(1, 1))\n ####Ez-------------------#####\n Ez_w_x_2[i][k] = np.vstack((Ez_w_x[i][k], line_one))\n Ezz_w_x[i][k] = (np.identity(Q) - np.matmul(beta[k], W[k]) + np.matmul(np.matmul(np.matmul(beta[k], diff), diff.T),beta[k].T))\n # print ('E2', Ezz_w_x.shape)\n ####------------Ezz--------------###\n Ezz_w_x_2[i][k] = np.column_stack((np.row_stack((Ezz_w_x[i][k], Ez_w_x[i][k].T)), Ez_w_x_2[i][k]))\n # print('得出',)\n #####------------单独计算W an miu\n W_and_miu_new[k]=np.column_stack((W[k],miu[k]))\n ##计算Q(log_likelihood)--------------------\n # print (rnk)\n sum = 0\n for i in range(D):\n for k in range(K):\n # print (W_and_miu_new[k].T, np.linalg.pinv(np.diag(psi)))\n xx = np.matmul(np.matmul(np.matmul(W_and_miu_new[k].T, np.linalg.pinv(np.diag(psi))),W_and_miu_new[k]), Ezz_w_x_2[i][k])\n p4 = 0.5 * rnk[i][k] * np.trace(xx)\n p2 = 0.5 * rnk[i][k] * np.matmul(np.matmul(data[i].T, np.linalg.pinv(np.diag(psi))),data[i])\n # print ('PPPP2',p2)\n p3 = 1 * rnk[i][k] * np.matmul(\n np.matmul(np.matmul(data[i].T, np.linalg.pinv(np.diag(psi))), W_and_miu_new[k]),Ez_w_x_2[i][k])\n p3 = p3\n sum = p2 - p3 + p4 + sum\n # print (psi)\n # print (np.log(abs(np.linalg.det(np.diag(psi)))))\n p1 = (D / 2) * np.log(abs(np.linalg.det(np.diag(psi))))\n # (2 * np.pi) ** (-D / 2)\n newloglikelyhood = const-p1 - sum\n print('NEWLOG', newloglikelyhood)\n ##现在在一次迭代中我们已经得到###\n ####----Q,Ezz_2,Ez_2,W_and_miu,rnk,psi的矩阵------###\n ##--------M-step----------------########\n for k in range(K):\n ##更新factor loading W and mean miu\n ##跟新pai 对i求和\n W_k_p1_sum = np.zeros(shape=[N,Q+1])\n Mu_k_p1_sum = np.zeros(shape=[Q +1,Q+1])\n pai_new_sum=0\n\n for i in range(D):\n W_k_p1_sum=rnk[i][k]*np.matmul(data[i].reshape(data[i].shape[0],1),Ez_w_x_2[i][k].T)+W_k_p1_sum\n Mu_k_p1_sum=rnk[i][k]*Ezz_w_x_2[i][k]+Mu_k_p1_sum\n ###pai的加和\n # print ('RNK',rnk[i][k])\n pai_new_sum=rnk[i][k]+pai_new_sum\n pai[k]=pai_new_sum/N #####更新PAI\n # print ('PPPAAAAAIII',pai)\n W_and_miu_new[k]=np.matmul(W_k_p1_sum,np.linalg.pinv(Mu_k_p1_sum))\n # print ('一个NEW',W_and_miu_new.shape)\n W[k,:,:]=W_and_miu_new[k,:,:W_and_miu_new[k].shape[1]-1]\n # print ('XIN WWW',W.shape)####更新WWWWW\n miu[k,:]=W_and_miu_new[k,:,-1].T.reshape(100,1) ####更新MIU!!\n ##更新协方差矩阵\n psi_new_p0=np.zeros(shape=[N,N])\n ##对i求和\n for i in range(D):\n ##对 k求和,\n data_i=data[i].reshape(data[i].shape[0],1)\n psi_new_p1=np.zeros(shape=[N,N])\n # print (psi_new_p1.shape)\n for k in range(K):\n pp1=np.matmul(W_and_miu_new[k],Ez_w_x_2[i][k])\n # print ('P111',p1.shape)\n psi_new_p1=rnk[i][k]*np.matmul((data_i-pp1),data_i.T)+psi_new_p1\n # print ('qqqqqqqqqq',psi_new_p1.shape)\n psi_new_p0=psi_new_p1+psi_new_p0\n # print (psi_new_p1.shape)\n ##最后的取对角线得新的协方差矩阵\n # print ('%%%%%%%',psi_new_p0.shape)\n #####见论文\n psi=np.diag(psi_new_p0)/D# 更新方差\n print ('PSI',psi.shape)\n # print ('PPPSSSII',Psi_New,np.trace(psi_new_p0))\n # rnk_=rnk/sumres\n # r.append(np.sum(rnk))##????????????\n # print('每一行数据的和', r)\n # # print('dasdas',len(r))\n # R.append(r)\n # print(np.array(R)[49])\n\n print('save_model')\n dic={'miu':miu,'pai':pai,'W':W,'psi':psi}\n # print ()\n # const=-N/2*log(np.linalg.det(psi))\n # part2=0\n # # part3=\n # for i in range(N):\n # for j in range(K):\n # part2=0.5*rnk*data[i].T*np.linalg.inv(psi)*data[i]+part2\n\n submodel = dic\n model.append(submodel)\n model=model\n # You can modify this to save other variables, etc \n # but make sure the name of the file is 'model.npz.\n np.savez_compressed('model.npz', model=model)", "def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha", "def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())", "def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))", "def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]", "def set_prior(self,which,what):\n\n which = self.grep_param_names(which)\n\n #check tied situation\n tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie)==set(which))]\n if len(tie_partial_matches):\n raise ValueError, \"cannot place prior across partial ties\"\n tie_matches = [tie for tie in self.tied_indices if set(which)==set(tie) ]\n if len(tie_matches)>1:\n raise ValueError, \"cannot place prior across multiple ties\"\n elif len(tie_matches)==1:\n which = which[:1]# just place a prior object on the first parameter\n\n\n #check constraints are okay\n if isinstance(what, (priors.gamma, priors.log_Gaussian)):\n assert not np.any(which[:,None]==self.constrained_negative_indices), \"constraint and prior incompatible\"\n assert not np.any(which[:,None]==self.constrained_bounded_indices), \"constraint and prior incompatible\"\n unconst = np.setdiff1d(which, self.constrained_positive_indices)\n if len(unconst):\n print \"Warning: constraining parameters to be positive:\"\n print '\\n'.join([n for i,n in enumerate(self._get_param_names()) if i in unconst])\n print '\\n'\n self.constrain_positive(unconst)\n elif isinstance(what,priors.Gaussian):\n assert not np.any(which[:,None]==self.all_constrained_indices()), \"constraint and prior incompatible\"\n else:\n raise ValueError, \"prior not recognised\"\n\n\n #store the prior in a local list\n for w in which:\n self.priors[w] = what", "def pred(self, x):\r\n\r\n y = self.alpha * x + self.beta * self.__Us__(x) + self.gamma * self.__Vs__(x) + self.intercept\r\n return y", "def _call(self, x):\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1", "def __e__(self, ys, us):\n\n # Initial values\n init_y = ys[:, :, 0]\n init_mu = self.mus[:, :, 0]\n init_V = self.Vs[:, :, 0]\n\n self.ys = ys[:, :, 1:]\n self.us = us[:, :, 1:]\n\n # Note that we set the ys and us to be one less observation\n n = self.n_observations() + 1\n\n # These values will be with respect to actual observations t. Not filter_t, see comments below.\n C1_ts = np.zeros((self.observations_size, self.state_size, n))\n C2_ts = np.zeros((self.observations_size, self.observations_size, n))\n P_ts = np.zeros((self.state_size, self.state_size, n))\n P_t_tm1s = np.zeros((self.state_size, self.state_size, n))\n P_tm1s = np.zeros((self.state_size, self.state_size, n))\n G_ts = np.zeros((self.state_size, self.observations_size, n))\n\n (y_pred_online, ll_hist, V_smooth_tp1_ts) = self.smooth_filter(likelihood=True)\n\n # Note that these values are the smoothed mus from the backward algorithm.\n\n # Calculate initial Expectations.\n P_ts[:, :, 0] = init_V + init_mu @ init_mu.T\n\n # No P_tm1 or P_t_tm1 at t = 0\n\n # Calculate initial values needed for maximisation step\n C1_ts[:, :, 0] = init_y @ init_mu.T\n C2_ts[:, :, 0] = init_y @ init_y.T\n G_ts[:, :, 0] = init_mu @ init_y.T\n\n # Initial t index is out by 1 since we removed the first observation from filtering, to use as initial prior.\n # The data observed by the filter will thus be out by 1.\n # Let t represent the actual observation index and filter_t be the index observed by the filter. ie:\n # filter_t = 0..n-1 <=> t = 1..n\n for filter_t in range(n-1):\n t = filter_t + 1\n (y_t, u_t) = self.data(filter_t)\n (mu_t, V_t) = self.state(filter_t)\n C1_ts[:, :, t] = y_t @ mu_t.T\n C2_ts[:, :, t] = y_t @ y_t.T\n G_ts[:, :, t] = mu_t @ y_t.T\n P_ts[:, :, t] = V_t + mu_t @ mu_t.T\n\n (mu_tm1, V_tm1) = self.state(filter_t-1)\n P_t_tm1s[:, :, t] = V_smooth_tp1_ts[:, :, filter_t] + mu_t @ mu_tm1.T\n P_tm1s[:, :, t] = V_tm1 + mu_tm1 @ mu_tm1.T\n\n return C1_ts, C2_ts, P_ts, P_t_tm1s, P_tm1s, G_ts, ll_hist", "def prelu(input, weight):\n return FunctionLib.apply('PRelu', input.device, [input, weight])", "def trainModel( self, featureTrain, classTrain):", "def pretrain_forward(self, inp):\n return self.pre_fc(self.encoder(inp))", "def prior_sample_parameter(self, parameter):\n pass", "def bootstrap(model,state_dim,prior,ens_size,interval,nanl,tanl,obs,Q):\n \n # store the analysis times indices in the full integration interval\n a_time = np.array(range(0,len(interval),tanl))\n\n # storage dictionary for the trajectories and weights\n p_series = {}\n A = 'A_'\n\n # divergence safety check\n divergence = False\n \n # define the initial weights\n weights = (1.0/ens_size)*np.ones(ens_size) \n\n # loop through the analysis times starting at time zero\n for i in range(nanl):\n\n # store the prior weights and states\n\tprior_W = weights \n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n\n # recompute the weights, and throw out neglible particles\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i,:],Q,prior,ens_size,state_dim) \n\tpost_S = np.reshape(analysis,[ens_size,state_dim])\n\n # check for filter divergence\n if ens_size < 10:\n divergence = True\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n break\n \n # integrate the initial cloud to the next analysis time;\n # note integration interval starts at time 0, and slice notation goes to the last index - 1\n traj = odeint(model,analysis,interval[a_time[i]:a_time[i+1]+1])\n \n #create storage for next iteration\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights,'traj':traj}\n \n #initialize the next forecast\n prior = traj[-1,:]\n \n # final analysis time weight update - no forward trajectory to store\n if not divergence:\n\tprior_W = weights\n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i+1,:],Q,prior,ens_size,state_dim)\n\tpost_S = np.reshape(analysis,[ens_size,state_dim]) \n\tA_i = A + str(i+1)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n \n return p_series", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_bic_score = float('inf')\n best_model = self.base_model(self.n_constant)\n\n # num_states: for n between self.min_n_components and self.max_n_components\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n model = self.base_model(num_states)\n\n # logL: log(the likelihood of the fitted model)\n try:\n logL = model.score(self.X, self.lengths)\n except Exception as e:\n continue\n\n # N: the number of data points (= sample size)\n N = sum(self.lengths)\n\n # p: the number of free parameters\n # http://hmmlearn.readthedocs.io/en/latest/api.html\n # Attributes of GaussianHMM\n # transmat_: (array, shape (n_components, n_components)) Matrix of transition probabilities between states.\n # since they add up to 1.0, the last row can be calculated from others,\n # so it is n_components * (n_components - 1).\n # startprob_: (array, shape (n_components, )) Initial state occupation distribution.\n # since they add up to 1.0, it is (n_components - 1).\n # means_: (array, shape (n_components, n_features)) Mean parameters for each state.\n # covars_: (array) Covariance parameters for each state. (n_components, n_features) if “diag”\n # p = #transmat_ + #startprob_ + #means_ + #covars_\n # = n_components * (n_components - 1) + n_components - 1 + n_components * n_features + n_components * n_features\n p = num_states ** 2 + 2 * num_states * model.n_features - 1\n\n # BIC = -2 * logL + p * logN\n bic_score = -2 * logL + p * np.log(N)\n\n if bic_score < best_bic_score:\n best_bic_score, best_model = bic_score, model\n \n return best_model", "def _init_predictor(self):\n self.conv_cls_prev = self._init_branch(\n conv_channels=self.cls_branch,\n conv_strides=(1, ) * len(self.cls_branch))\n self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels,\n 1)\n # init regression head\n self.conv_reg_prevs = nn.ModuleList()\n # init output head\n self.conv_regs = nn.ModuleList()\n # group_reg_dims:\n # ((4, ), (2, ), (20, ), (3, ), (3, ), (8, 8), (1, ), (1, ))\n for i in range(len(self.group_reg_dims)):\n reg_dims = self.group_reg_dims[i]\n reg_branch_channels = self.reg_branch[i]\n out_channel = self.out_channels[i]\n reg_list = nn.ModuleList()\n if len(reg_branch_channels) > 0:\n self.conv_reg_prevs.append(\n self._init_branch(\n conv_channels=reg_branch_channels,\n conv_strides=(1, ) * len(reg_branch_channels)))\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(out_channel, reg_dim, 1))\n self.conv_regs.append(reg_list)\n else:\n self.conv_reg_prevs.append(None)\n for reg_dim in reg_dims:\n reg_list.append(nn.Conv2d(self.feat_channels, reg_dim, 1))\n self.conv_regs.append(reg_list)", "def first_class_tp(nvar=18, prowQ=9, mcon=4 ):\n n = nvar + prowQ + mcon\n p = prowQ + mcon\n m = mcon\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.ones(m)*inf\n uvar = np.ones(n)*inf\n lvar = -np.ones(n)*inf\n #name = str(p)+'_'+str(n)+'_'+str(m)+'_First'+'.txt'\n name = str(p)+'_'+str(n)+'_'+str(m)+'_l1_tp'+'.txt'\n \n # Q randomly chosen such that Qij belong to the (-10,10)\n Q = 10 * np.random.rand(p, n)*(np.random.randint(3, size=(p,n))-1)\n \n # d=(di), di=sum dij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n # B randomly chosen such that Bij belong to the (-3,3)\n B = 3 * np.random.rand(m, n)*(np.random.randint(3, size=(m,n))-1)\n \n b= np.zeros(m)\n b[0] = B[0,:].sum()\n for i in range(m):\n mu = np.random.rand()+ 1e-10\n b[i] = B[i,:].sum()-m*mu\n lcon = b\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def Fx_case_B_Chris(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n alp = alpha(z, x, beta2)\n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n\n kap = 2*(alp - z)/beta\n #kap = sqrt(x**2 + 4*(1+x)*sin(alp)**2) # kappa for case B\n \n N1 = sin2a - beta*kap\n N2 = (1+x)*sin2a - beta*kap\n D = kap - beta*(1+x)*sin2a\n \n #out = (1+x)*(N1*N2 )/D**3\n\n ### SC term with prefactor 1/(gamma*beta)^2 = 1/(gamma^2-1)\n NSC = (1 + beta2 - beta*kap*sin2a + x - cos2a*(1 + beta2*(1 + x)) ) / (gamma**2-1) \n out = (1+x)*(N1*N2 + NSC)/D**3\n return out" ]
[ "0.6737038", "0.636095", "0.607639", "0.60483646", "0.60367125", "0.60130304", "0.59627426", "0.59598047", "0.59528166", "0.5932962", "0.5882837", "0.58736926", "0.5853717", "0.5850431", "0.5840175", "0.5834046", "0.57900655", "0.57882077", "0.5787185", "0.57477564", "0.57408", "0.57374907", "0.57234085", "0.57124555", "0.57115716", "0.56635296", "0.56245697", "0.56222695", "0.55915195", "0.5585057", "0.55828905", "0.5552311", "0.5548124", "0.55430526", "0.5540045", "0.5537883", "0.5536559", "0.55320907", "0.5522009", "0.55099344", "0.54941833", "0.54941064", "0.5477466", "0.54647714", "0.5458239", "0.54537344", "0.5452985", "0.5452454", "0.5421134", "0.54080266", "0.54004526", "0.5399659", "0.53978413", "0.53966135", "0.5388528", "0.5385467", "0.5380184", "0.5378674", "0.5376641", "0.53732455", "0.5365286", "0.5358806", "0.5340024", "0.532967", "0.53229105", "0.5321105", "0.5318943", "0.53098994", "0.53083634", "0.5304265", "0.5293888", "0.529076", "0.5289496", "0.52878463", "0.528585", "0.52821743", "0.52798223", "0.52798223", "0.52798223", "0.52798223", "0.52785885", "0.52746135", "0.5271404", "0.5265434", "0.5264972", "0.5263048", "0.5258506", "0.5258053", "0.5257631", "0.52541935", "0.52514935", "0.5246714", "0.5245782", "0.523655", "0.52355075", "0.5226541", "0.522152", "0.52134824", "0.521043", "0.5206406", "0.5203604" ]
0.0
-1
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
def bmshj2018_hyperprior( quality, metric="mse", pretrained=False, progress=True, **kwargs ): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model( "bmshj2018-hyperprior", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):", "def bias_prior(self):", "def set_hyper_parameters(self, x):\n self.set_scale(x[0])", "def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\n self.sumWeight = priorWeight\n self.scale = 1\n self.AtA = np.eye(self.n)*priorWeight\n self.AtAinv = np.eye(self.n)/priorWeight\n self.Atb = xPrior*priorWeight\n self.btb = np.dot(xPrior,xPrior)*priorWeight\n self.degenerate = False\n self.x = xPrior", "def get_scale_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2), nn.Tanh())", "def supervised_cost_scale(\n scale, loss_supervised, output_noise_labelled, labelled_target\n):\n cost_supervised = loss_supervised.forward(output_noise_labelled, labelled_target)\n\n cost_supervised *= scale\n return cost_supervised", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self._gev_bijector.scale", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def rescale_prior(predictionmatrix, priorprobs):\n\tpriorprobs = np.trim_zeros(priorprobs, 'f')\n\tN, C = np.shape(predictionmatrix)\n\tassert C == np.size(priorprobs)\n\tpriorprobs = priorprobs / np.sum(priorprobs).astype(float)\n\taverageprediction = np.mean(predictionmatrix, axis = 0)\n\tfactor = priorprobs / averageprediction\n\tresult = predictionmatrix * np.repeat(factor[None,:],N,0)\n\treturn normalize_probabilities(result)", "def _likelihood_der1_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize jacobian\n dell_dscale = numpy.zeros((scale.size, ), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n if self.stochastic_traceinv:\n # Compute traceinv using stochastic estimation method. Note\n # that since Knp is not positive-definite, we cannot use\n # Cholesky method in imate. The only viable option is\n # Hutchinson's method.\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n trace_KnpKninv = self.mixed_cor.traceinv(\n eta, B=Knp, imate_options={'method': 'hutchinson'})\n else:\n trace_KnpKninv = imate.trace(self.KnpKninv[p], method='exact')\n\n # Compute the second component of trace of Knp * M\n KnpY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p])\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n CYtKnpY = numpy.matmul(self.C, YtKnpY)\n trace_CYtKnpY = numpy.trace(CYtKnpY)\n\n # Compute trace of Knp * M\n trace_KnpM = trace_KnpKninv - trace_CYtKnpY\n\n # Compute zMKnpMz\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n zMKnpMz = numpy.dot(self.Mz, KnpMz)\n\n # Derivative of ell w.r.t p-th element of distance scale\n dell_dscale[p] = -0.5*trace_KnpM + 0.5*zMKnpMz / sigma2\n\n return dell_dscale", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def prior(kernel_size, bias_size): #removed dtype=None, unused argument\n number = kernel_size + bias_size\n prior_model = keras.Sequential(\n [\n tfp.layers.DistributionLambda(\n lambda t: tfp.distributions.MultivariateNormalDiag(\n loc=tf.zeros(number), scale_diag=tf.ones(number)\n )\n )\n ]\n )\n return prior_model", "def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)", "def scale(self,bvp):\n\n sol = bvp.solution\n # Additional aux entries for initial and terminal BCs\n extras = [{'type':'initial','vars':self.problem_data['state_list']},\n {'type':'terminal','vars':self.problem_data['state_list']}]\n\n # Scale the states and costates\n for idx,state in enumerate(self.problem_data['state_list']):\n sol.y[idx,:] /= self.scale_vals['states'][state]\n\n # Scale auxiliary variables\n for aux in (self.problem_data['aux_list']+extras):\n if aux['type'] not in Scaling.excluded_aux:\n for var in aux['vars']:\n sol.aux[aux['type']][var] /= self.scale_vals[aux['type']][var]\n\n # Scale parameters\n for idx, param in enumerate(self.problem_data['parameter_list']):\n sol.parameters[idx] /= self.scale_vals['parameters'][param]", "def scaleTo(h,ht):\n total_h=h.Integral(1,h.GetNbinsX())\n if type(ht) is float:\n h.Scale(ht/total_h)\n else:\n total_ht=ht.Integral(1,ht.GetNbinsX())\n h.Scale(total_ht/total_h)", "def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:\n return sample", "def scale(self):\n return self.distribution.scale", "def setupMixedPrior(self):\n\n if self.namePrior.find('mixed') < 0:\n return\n\n # we set up the default parameters for bounded flat prior,\n # then update them with non-flat examples\n if np.size(self.hyper) < 7:\n self.setupDefaultPars()\n\n # Adjust the hyperparameters for defaults.\n self.hyper[0][2] = 0.45\n self.hyper[1][2] = 0.05\n self.hyper[0][3] = 16.3\n self.hyper[1][3] = 0.1\n\n nMeths = np.shape(self.hyper)[-1]\n self.mixedNames = ['binaryBoundedOne' for i in range(nMeths)]\n\n ### Let's try some gaussians. Eccentricity and period\n self.mixedNames[2] = 'gaussianOne'\n self.mixedNames[3] = 'gaussianOne'\n\n self.findMixedMethods()", "def fit_line(model,scaling):\n\treturn scaling*model", "def init_weights(self, t, prior, scaled_prior = False):\n from scipy.cluster.vq import kmeans2\n from scipy.spatial.distance import cdist\n \n # check if t has the right shape\n if not t.ndim == 2:\n t = t[:, None]\n \n if scaled_prior:\n #self.w1 = np.random.normal(loc=0.0, scale = 1,size=[H, d+1])/np.sqrt(d+1) # 1st layer weights + bias\n #self.w2 = np.random.normal(loc=0.0, scale = 1,size=[ny, H+1])/np.sqrt(H+1) # 2nd layer weights + bias\n sigma1 = 1.0/np.sqrt(self.d+1)\n sigma2 = 1.0/np.sqrt(self.H+1)\n else:\n # init weights from gaussian with width given by prior\n sigma1 = 1.0/np.sqrt(prior)\n sigma2 = sigma1\n \n self.w1 = np.random.normal(loc=0.0, scale = 1,size=[self.H, self.d+1]) * sigma1 # 1st layer weights + bias\n self.w2 = np.random.normal(loc=0.0, scale = 1,size=[self.ny, self.H+1]) * sigma2 # 2nd layer weights + bias\n \n # init biases (taken from netlab, gmminit.m)\n [centroid, label] = kmeans2(t, self.M)\n cluster_sizes = np.maximum(np.bincount(label), 1) # avoid empty clusters\n alpha = cluster_sizes/np.sum(cluster_sizes)\n if (self.M > 1):\n # estimate variance from the distance to the nearest centre\n sigma = cdist(centroid, centroid)\n sigma = np.min(sigma + np.diag(np.diag(np.ones(sigma.shape))) * 1000, 1)\n sigma = np.maximum(sigma, np.finfo(float).eps) # avoid underflow\n else:\n # only one centre: take average variance\n sigma = np.mean(np.diag([np.var(t)]))\n # set biases, taken from netlab, mdninit.m\n self.w2[0:self.M,0] = alpha\n self.w2[self.M:2*self.M,0] = np.log(sigma)\n self.w2[2*self.M:,0] = np.reshape(centroid, [self.M * self.c])", "def prescaler(self) -> int:", "def calc_scale(alpha, targets, preds, gamma):\n return alpha * tf.pow(tf.abs(targets - tf.nn.sigmoid(preds)), gamma)", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def model_method(data, **prior_params):\n weight_m = np.vstack((data.weight_std, data.weight_std**2, data.weight_std**3))\n with pm.Model() as model:\n alpha = pm.Normal('alpha', mu=prior_params['alpha mu'], sd=prior_params['alpha sd'])\n beta = pm.Normal('beta', mu=prior_params['beta mu'], sd=prior_params['beta sd'], shape=3)\n sigma = pm.Uniform('sigma', lower=prior_params['sigma lower'], upper=prior_params['sigma upper'])\n mu = pm.Deterministic('mu', alpha + pm.math.dot(beta, weight_m))\n height = pm.Normal('height', mu=mu, sd=sigma, observed=data.height)\n return model", "def mult_var_by_prior(self, x_scaled):\n model_var = self.likelihood(x_scaled)[1]\n tensor_log_prior = self.log_prior(x_scaled)\n return tf.reshape(model_var, shape=tensor_log_prior.shape) * tf.math.exp(tensor_log_prior)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias", "def scale(curve):\n return curve/rmsd(curve)", "def __init__(self,scale):\n self.scale = scale", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def scale_curve(beta):\n n, T = beta.shape\n normbetadot = zeros(T)\n betadot = gradient(beta, 1. / T)\n betadot = betadot[1]\n for i in range(0, T):\n normbetadot[i] = norm(betadot[:, i])\n\n scale = trapz(normbetadot, linspace(0, 1, T))\n beta_scaled = beta / scale\n\n return (beta_scaled, scale)", "def forward(self, x, scale_param):\n reg_arr = torch.exp(scale_param * self.reg_conv(self.reg_branch(x)))\n label_branch_arr = self.label_branch(x)\n label_arr = self.label_conv(label_branch_arr)\n center_arr = self.center_conv(label_branch_arr)\n return (reg_arr, label_arr, center_arr)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def GetScale(self):\n ...", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def _likelihood_der2_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize Hessian\n d2ell_dscale2 = numpy.zeros((scale.size, scale.size), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n MKnpMz = self.M_dot(self.C, self.Y, eta, KnpMz)\n\n for q in range(scale.size):\n\n # 1. Compute zMKnqMKnpMz\n if p == q:\n KnqMz = KnpMz\n else:\n KnqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[q])\n zMKnqMKnpMz = numpy.dot(KnqMz, MKnpMz)\n\n # 2. Compute zMKnpqMz\n KnpqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[p, q])\n zMKnpqMz = numpy.dot(self.Mz, KnpqMz)\n\n # 3. Computing trace of Knpq * M in three steps\n\n # Compute the first component of trace of Knpq * Kninv\n Knpq = self.mixed_cor.get_matrix(eta, derivative=[p, q])\n if self.stochastic_traceinv:\n trace_KnpqKninv = self.mixed_cor.traceinv(\n eta, B=Knpq,\n imate_options={'method': 'hutchinson'})\n else:\n KnpqKninv = Knpq @ self.Kninv\n trace_KnpqKninv = imate.trace(KnpqKninv, method='exact')\n\n # Compute the second component of trace of Knpq * M\n KnpqY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p, q])\n YtKnpqY = numpy.matmul(self.Y.T, KnpqY)\n CYtKnpqY = numpy.matmul(self.C, YtKnpqY)\n trace_CYtKnpqY = numpy.trace(CYtKnpqY)\n\n # Compute trace of Knpq * M\n trace_KnpqM = trace_KnpqKninv - trace_CYtKnpqY\n\n # 4. Compute trace of Knp * M * Knq * M\n\n # Compute first part of trace of Knp * M * Knq * M\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n Knq = self.mixed_cor.get_matrix(eta, derivative=[q])\n if self.stochastic_traceinv:\n trace_KnpMKnqM_1 = self.mixed_cor.traceinv(\n eta, B=Knq, C=Knp,\n imate_options={'method': 'hutchinson'})\n else:\n KnpKninvKnqKninv = numpy.matmul(self.KnpKninv[p],\n self.KnpKninv[q])\n trace_KnpMKnqM_1 = imate.trace(KnpKninvKnqKninv,\n method='exact')\n\n # Compute the second part of trace of Knp * M * Knq * M\n KnpY = Knp @ self.Y\n if p == q:\n KnqY = KnpY\n else:\n KnqY = Knq @ self.Y\n KninvKnqY = self.mixed_cor.solve(KnqY, eta=eta)\n YtKnpKninvKnqY = numpy.matmul(KnpY.T, KninvKnqY)\n F21 = numpy.matmul(self.C, YtKnpKninvKnqY)\n F22 = numpy.matmul(self.C, YtKnpKninvKnqY.T)\n trace_KnpMKnqM_21 = numpy.trace(F21)\n trace_KnpMKnqM_22 = numpy.trace(F22)\n\n # Compute the third part of trace of Knp * M * Knq * M\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n if p == q:\n YtKnqY = YtKnpY\n else:\n YtKnqY = numpy.matmul(self.Y.T, KnqY)\n Dp = numpy.matmul(self.C, YtKnpY)\n if p == q:\n Dq = Dp\n else:\n Dq = numpy.matmul(self.C, YtKnqY)\n D = numpy.matmul(Dp, Dq)\n trace_KnpMKnqM_3 = numpy.trace(D)\n\n # Compute trace of Knp * M * Knq * M\n trace_KnpMKnqM = trace_KnpMKnqM_1 - trace_KnpMKnqM_21 - \\\n trace_KnpMKnqM_22 + trace_KnpMKnqM_3\n\n # 5. Second \"local\" derivatives w.r.t scale\n local_d2ell_dscale2 = -0.5*trace_KnpqM + 0.5*trace_KnpMKnqM + \\\n (0.5*zMKnpqMz - zMKnqMKnpMz) / sigma2\n\n # Computing total second derivative\n dp_log_sigma2 = -numpy.dot(self.Mz, KnpMz) / \\\n (self.rdof*sigma2)\n if p == q:\n dq_log_sigma2 = dp_log_sigma2\n else:\n dq_log_sigma2 = -numpy.dot(self.Mz, KnqMz) / \\\n (self.rdof*sigma2)\n d2ell_dscale2[p, q] = local_d2ell_dscale2 + \\\n 0.5 * self.rdof * dp_log_sigma2 * dq_log_sigma2\n\n if p != q:\n d2ell_dscale2[q, p] = d2ell_dscale2[p, q]\n\n return d2ell_dscale2", "def __init__(self,model,alpha=0,head_min=0,head_max=1,k=1,\r\n variables=[],priors=[]):\r\n \r\n import numpy as np\r\n \r\n # Append the base to the elementlist\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # Set orientation value\r\n self.alpha = alpha\r\n \r\n # Set potential scaling variables\r\n self.head_min = head_min\r\n self.head_max = head_max\r\n \r\n # Assign the hydraulic conductivity of the base model\r\n self.k = k\r\n \r\n # The model requires the base flow in terms of hydraulic potential (phi)\r\n # The function head_to_potential extracts the following variables:\r\n # phi_min hydraulic potential corresponding to head_min\r\n # phi_max hydraulic potential corresponding to head_max\r\n self.head_to_potential()\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.variables += [var]\r\n self.model.priors += [self.priors[idx]]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data", "def rescale(tx):\n mins = np.amin(tx, axis=0)\n maxs = np.amax(tx, axis=0)\n txscale = (tx - mins) / (maxs - mins)\n return txscale", "def scale(self, alpha):\n\t\tc = SparseVector(self.d)\n\t\tfor i in self.st.keys():\n\t\t\tc.put(i, alpha*self.get(i))\n\t\treturn c", "def backprop(self, loss: torch.FloatTensor):\n if self.scaler is not None:\n self.scaler.scale(loss).backward()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()", "def scale(x, p=2, inplace=False):\n return x / np.linalg.norm(x, ord=p)", "def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n return sample", "def normalize_to_prob(inp):\n return (inp + 1)/2", "def scale_invert(self):", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def __init__(self, input_dim=600+9, output_dim=1*3, dropout_prob=0., scale=3):\n super(F0_RNN_Scaled, self).__init__(input_dim=input_dim, output_dim=output_dim, dropout_prob=dropout_prob)\n self.scale = scale", "def rescale(self):\n # Get the L1 norm of data and scale correction for each fiber\n data_dims = self.data_dims\n if data_dims is ():\n tens_scale = self.data.abs()\n else:\n tens_scale = torch.sum(self.data.abs(), dim=data_dims, keepdim=True)\n log_shift = torch.floor(TARGET_SCALE(self.shape, data_dims) - \n torch.log2(tens_scale))\n\n # Keep the scale for zero fibers unchanged\n if torch.any(torch.isinf(log_shift)):\n log_shift = torch.where(torch.isfinite(log_shift), log_shift,\n torch.zeros_like(log_shift))\n\n return STensor(self.data*(2**log_shift), \n self.scale-log_shift)", "def normalize_1d(x, scale=(0, 1, 1024)):\n new_min = scale[0]\n new_max = scale[1]\n new_len = scale[2]\n (min_x, max_x, old_size) = scale_1d(x)\n x_norm = (x - min_x) / (max_x - min_x)\n old_baseline = np.linspace(0, 1, old_size)\n new_baseline = np.linspace(0, 1, new_len)\n if len(old_baseline) <= 1:\n old_baseline = np.array([0, 1])\n x_norm = np.array([1, 0])\n x_interp = interp.interp1d(old_baseline, x_norm)\n x_resized = (x_interp(new_baseline) * (new_max - new_min)) + new_min\n return x_resized", "def standardize(layer, offset, scale, shared_axes='auto'):\n # Subtract the offset\n layer = BiasLayer(layer, -offset, shared_axes)\n # Do not optimize the offset parameter\n layer.params[layer.b].remove('trainable')\n # Divide by the scale\n layer = ScaleLayer(layer, floatX(1.)/scale, shared_axes)\n # Do not optimize the scales parameter\n layer.params[layer.scales].remove('trainable')\n return layer", "def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled", "def forward(self, x, alpha=1e-8):\r\n y = x.pow(2.).mean(dim=1, keepdim=True).add(alpha).sqrt() # [N1HW]\r\n y = x / y # normalize the input x volume\r\n return y", "def _update_model(self, new_model):\n super()._update_model(new_model)\n\n if 'e' in self.tr_params:\n if self.state_no_train_de is None:\n for i in range(self.n_emissions - self.nr_no_train_de):\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n for i in range(self.n_d_emissions):\n if i < self.n_d_emissions - self.nr_no_train_de:\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n self.B[i][: -self.state_no_train_de, :] = (\n (1 - self.learning_rate)\n * new_model['B'][i][: -self.state_no_train_de, :]\n + self.learning_rate *\n self.B[i][: -self.state_no_train_de, :]\n )\n\n for i in range(self.n_emissions):\n normalise(new_model['B'][i], axis=1)", "def estimatePrior(bandwidth, shape):\n prior = 1.0\n\n for b in bandwidth.flatten():\n prior *= 1.0 + shape * b\n\n return 1.0 / prior", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def _hyperparam_to_scale(self, hyperparam):\n\n # If logscale is used, input hyperparam is log of the scale.\n if self.use_log_scale:\n scale = 10.0**hyperparam\n else:\n scale = numpy.abs(hyperparam)\n\n return scale", "def scaleLandsat(self,img):\n\t\tthermal = img.select(ee.List(['thermal'])).multiply(0.1)\n\t\tscaled = ee.Image(img).select(self.env.divideBands).multiply(ee.Number(0.0001))\n\t\t\n\t\treturn img.select([]).addBands(scaled).addBands(thermal)", "def normalize_0d(x, old_scale=(0, 1, 1024), new_scale=(0, 1, 1024)):\n old_delta = old_scale[1] - old_scale[0]\n new_delta = new_scale[1] - new_scale[0]\n old_min = old_scale[0]\n new_min = new_scale[0]\n return (x - old_min) * (new_delta / old_delta) + new_min", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def bootstrap(model,state_dim,prior,ens_size,interval,nanl,tanl,obs,Q):\n \n # store the analysis times indices in the full integration interval\n a_time = np.array(range(0,len(interval),tanl))\n\n # storage dictionary for the trajectories and weights\n p_series = {}\n A = 'A_'\n\n # divergence safety check\n divergence = False\n \n # define the initial weights\n weights = (1.0/ens_size)*np.ones(ens_size) \n\n # loop through the analysis times starting at time zero\n for i in range(nanl):\n\n # store the prior weights and states\n\tprior_W = weights \n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n\n # recompute the weights, and throw out neglible particles\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i,:],Q,prior,ens_size,state_dim) \n\tpost_S = np.reshape(analysis,[ens_size,state_dim])\n\n # check for filter divergence\n if ens_size < 10:\n divergence = True\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n break\n \n # integrate the initial cloud to the next analysis time;\n # note integration interval starts at time 0, and slice notation goes to the last index - 1\n traj = odeint(model,analysis,interval[a_time[i]:a_time[i+1]+1])\n \n #create storage for next iteration\n A_i = A + str(i)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights,'traj':traj}\n \n #initialize the next forecast\n prior = traj[-1,:]\n \n # final analysis time weight update - no forward trajectory to store\n if not divergence:\n\tprior_W = weights\n\tprior_S = np.reshape(prior,[ens_size,state_dim])\n [analysis,weights,ens_size] = no_resample_update(weights,obs[i+1,:],Q,prior,ens_size,state_dim)\n\tpost_S = np.reshape(analysis,[ens_size,state_dim]) \n\tA_i = A + str(i+1)\n p_series[A_i] = {'prior':prior_S,'prior_weight':prior_W,'post':post_S,'post_weight':weights}\n \n return p_series", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def get_prior(self):\n assert self._prior in self._priors, 'Unsupported prior! Check the _priors attribute for a list of priors.'\n if self._prior == 'Gaussian':\n prior = 0.5 * torch.sum(self.parameters ** 2)/self.prior_var\n elif self._prior == 'Cauchy':\n dimconst = (self.parameters.shape[0] + 1)/2.\n prior = dimconst*torch.log(self.prior_var + torch.sum(self.parameters ** 2))\n elif self._prior == 'Sparse':\n n = self.dataset.shape[1]\n gauss_prior = 0.5 * torch.sum(torch.exp(self.parameters[-1] * torch.exp(self.parameters[n:2*n]) * self.parameters[:n] ** 2))\n gamma_density = torch.distributions.Gamma(1.5,0.5)\n# gamma_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n# lambda_density = torch.distributions.Gamma(1.5,0.5)\n lambda_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n prior = gauss_prior + lambda_prior\n return prior", "def ScaleParameters(net, indToScale, delta_vector):\n i = 0\n for index, param in enumerate(net.parameters()):\n if index in indToScale:\n l1Norm = torch.sum(torch.abs(param.data), dim =1)\n rows, _ = param.data.size()\n with torch.no_grad():\n for j in range(int(rows)):\n param.data[j,:] *= delta_vector[i]/l1Norm[j]\n i += 1", "def scale_together(data, comp):\n scales = []\n guess = 1.\n s = opt.minimize(sq_residuals_in_range, guess, args = (data, comp), \n method = 'Nelder-Mead').x\n return s", "def _scale_to_hyperparam(self, scale):\n\n # If logscale is used, output hyperparam is log of scale.\n if self.use_log_scale:\n hyperparam = numpy.log10(numpy.abs(scale))\n else:\n hyperparam = numpy.abs(scale)\n\n return hyperparam", "def unscale(self,bvp):\n sol = bvp.solution\n # Additional aux entries for initial and terminal BCs\n extras = [{'type':'initial','vars':self.problem_data['state_list']},\n {'type':'terminal','vars':self.problem_data['state_list']}]\n\n # Scale the states and costates\n for idx,state in enumerate(self.problem_data['state_list']):\n sol.y[idx,:] *= self.scale_vals['states'][state]\n\n # Scale auxiliary variables\n for aux in (self.problem_data['aux_list']+extras):\n if aux['type'] not in Scaling.excluded_aux:\n for var in aux['vars']:\n sol.aux[aux['type']][var] *= self.scale_vals[aux['type']][var]\n\n # Scale parameters\n for idx, param in enumerate(self.problem_data['parameter_list']):\n sol.parameters[idx] *= self.scale_vals['parameters'][param]", "def scaled_sigmoid(self, x):\r\n return (tf.keras.backend.sigmoid(x) * 30 - 5)", "def scale(x, feature_range=(-1, 1)):\n \n # scale from 0-1 to feature_range\n min, max = feature_range\n #x = x * (max - min) + min\n #x = torch.add(torch.mul(x, (max-min)), min)\n x = x.mul(max-min).add_(min)\n return x", "def simple_scaling(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n\n # Simple sclaing \n scaled_input_data = input_data / (Max + sys.float_info.min)\n\n # Return scaled input data\n return scaled_input_data", "def build_model(self, parser=None, surrogate_posterior=None, prior=None, likelihood=None, scaling_model=None, mc_sample_size=None):\n from careless.models.merging.surrogate_posteriors import TruncatedNormal\n from careless.models.merging.variational import VariationalMergingModel\n from careless.models.scaling.image import HybridImageScaler,ImageScaler\n from careless.models.scaling.nn import MLPScaler\n if parser is None:\n parser = self.parser\n if parser is None:\n raise ValueError(\"No parser supplied, but self.parser is unset\")\n\n if parser.type == 'poly':\n from careless.models.likelihoods.laue import NormalLikelihood,StudentTLikelihood\n elif parser.type == 'mono':\n from careless.models.likelihoods.mono import NormalLikelihood,StudentTLikelihood\n\n if prior is None:\n prior = self.get_wilson_prior(parser.wilson_prior_b)\n loc,scale = prior.mean(),prior.stddev()/10.\n low = (1e-32 * self.asu_collection.centric).astype('float32')\n if surrogate_posterior is None:\n surrogate_posterior = TruncatedNormal.from_loc_and_scale(loc, scale, low)\n\n if likelihood is None:\n dof = parser.studentt_likelihood_dof\n if dof is None:\n likelihood = NormalLikelihood()\n else:\n likelihood = StudentTLikelihood(dof)\n\n if scaling_model is None:\n mlp_width = parser.mlp_width\n if mlp_width is None:\n mlp_width = BaseModel.get_metadata(self.inputs).shape[-1]\n\n mlp_scaler = MLPScaler(parser.mlp_layers, mlp_width)\n if parser.use_image_scales:\n n_images = np.max(BaseModel.get_image_id(self.inputs)) + 1\n image_scaler = ImageScaler(n_images)\n scaling_model = HybridImageScaler(mlp_scaler, image_scaler)\n else:\n scaling_model = mlp_scaler\n\n model = VariationalMergingModel(surrogate_posterior, prior, likelihood, scaling_model, parser.mc_samples)\n\n opt = tf.keras.optimizers.Adam(\n parser.learning_rate,\n parser.beta_1,\n parser.beta_2,\n )\n\n model.compile(opt)\n return model", "def normalize_bounds(self, bounds):\n scaled_bounds = []\n scalings = []\n intercepts = []\n \n non_fixed_params = []\n \n print(self.device)\n \n for name, domain in self.bounds.items():\n # Get any fixed parmeters\n if type(domain) == int or type(domain) == float:\n # Take note\n self.fixed_parameters.append(name)\n\n # Free parameters\n elif type(domain) == tuple:\n # Bookkeeping\n self.free_parameters.append(name)\n\n # Get scaling\n lower_bound = min(domain)\n upper_bound = max(domain)\n scale = upper_bound - lower_bound\n\n # Transform to [0, 1] domain\n #scaled_bound = {'name': name, 'type': 'continuous', 'domain': (0., 1.)} #torch.adjustment required\n non_fixed_params.append(name)\n \n # Store\n #scaled_bounds.append(scaled_bound)\n scalings.append(scale)\n intercepts.append(lower_bound)\n else:\n raise ValueError(\"Domain bounds not understood\")\n \n n_hyperparams = len(non_fixed_params)\n \n scaled_bounds = cat([zeros(1,n_hyperparams, device = self.device), \n ones(1, n_hyperparams, device = self.device)], 0)\n return scaled_bounds, tensor(scalings, device = self.device, requires_grad = False), tensor(intercepts, device = self.device, requires_grad = False) #torch.adjustment required", "def add_prior(self, prior):\n if self.rate_variation:\n # Gamma prior with mean 1 over all mutation rates\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRatePrior.s:%s\" % self.name, \"name\":\"distribution\"})\n compound = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRateCompound:%s\" % self.name, \"spec\":\"beast.core.parameter.CompoundValuable\", \"name\":\"x\"})\n plate = ET.SubElement(compound, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"var\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n gamma = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRatePriorGamma:%s\" % self.name, \"spec\":\"beast.math.distributions.Gamma\", \"name\":\"distr\", \"alpha\":\"@featureClockRateGammaShape:%s\" % self.name, \"beta\":\"@featureClockRateGammaScale:%s\" % self.name})\n # Exponential hyperprior on scale of Gamma prior\n # Exponential prior favours small scales over large scales, i.e. less rate variation\n # Mean scale 0.23 chosen for general sensibility, e.g.:\n # - Prior distribution is roughly 50/50 that ratio of fastest\n # to slowest feature rate in a dataset of size 200 is below\n # or above 10.\n # - Prior probability of roughly 0.90 that this ratio is below\n # 100.\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRateGammaScalePrior.s:%s\" % self.name, \"name\":\"distribution\", \"x\":\"@featureClockRateGammaScale:%s\" % self.name})\n ET.SubElement(sub_prior, \"Exponential\", {\"id\":\"featureClockRateGammaShapePriorExponential.s:%s\" % self.name, \"mean\":\"0.23\", \"name\":\"distr\"})", "def predict(self, u=0):\n\n self.predict_x(u)\n \n W = np.concatenate((dot(self.F, self.U), self.Uq), axis=1)\n D = np.concatenate((self.D, self.Dq))\n self.U, self.D = mwgs(W, D)\n\n # save prior\n self.x_prior = np.copy(self.x)\n self.U_prior = np.copy(self.U)\n self.D_prior = np.copy(self.D)", "def scale(original_train, new_train):\n # find magnitude original training data\n o_mag = np.linalg.norm(np.stack(original_train[:,1]))\n # find magnitude new data\n n_mag = np.linalg.norm(np.stack(new_train[:,1]))\n # scale new data\n scale = o_mag / n_mag\n return scale", "def scale(self, alpha):\n coeff_scaled = [c*alpha for c in self.coeff]\n C = ExplicitIntegrator(coeff_scaled, self.updates)\n return C", "def smooth_activation(layer_norm, linear, activation_scales):\n if not isinstance(linear.weight, np.ndarray):\n linear_weight = linear.weight.numpy()\n activation_scales = activation_scales.numpy()\n else:\n linear_weight = linear.weight\n\n weight_scales = np.amax(np.absolute(linear_weight), axis=0)\n weight_scales = np.maximum(weight_scales, 1e-5)\n\n activation_scales = activation_scales.astype(weight_scales.dtype)\n\n scales = np.sqrt(activation_scales / weight_scales)\n scales = np.maximum(scales, 1e-5)\n\n if not isinstance(linear.weight, np.ndarray):\n import torch\n\n scales = torch.from_numpy(scales)\n\n layer_norm.gamma /= scales\n layer_norm.beta /= scales\n\n linear.weight *= scales.reshape(1, -1)", "def _scale_weights(self, max_weight):\n scale_factor = np.divide(1, max_weight)\n for exp in self.experts:\n exp.weight = exp.weight * scale_factor", "def transformPriorErrorCovariance(self):\n U_a, w_a, V_aT = np.linalg.linalg.svd(self.model.priorCovariance, \n full_matrices=False)\n V_a = V_aT.T\n self.priorSinvh = V_a * np.matrix(np.diag(np.sqrt(1.0/w_a))) * U_a.T\n self.priorSh = U_a * np.matrix(np.diag(np.sqrt(w_a))) * V_aT\n self.priorSinv = V_a * np.matrix(np.diag(1.0/w_a)) * U_a.T", "def normalized(self) -> 'NormalizedFXParam':\r\n ...", "def _para_boosting(self, H):\n # print '----------------primal-dual boost-------------------'\n H = np.hstack((H, -H))\n # H_ft = np.asfortranarray((H.copy()))\n (n, p) = H.shape\n self.c = np.log(n*p)\n nu = int(n * self.ratio)\n\n if self.max_iter < 50:\n delta = 1\n else:\n delta = 40\n d = np.ones(n) / n\n d_bar = np.ones(n) / n\n a_bar = np.ones(p) / p\n a = np.ones(p) / p\n h_a = np.sum(H, axis=1) / p\n h_a_bar = h_a.copy()\n # a_bar = a\n # a_tilde = np.ones(p) / p\n h_a_tilde = h_a.copy()\n # d_tilde = np.zeros(p)\n theta = 1\n sig = 1\n tau = 1\n t = 0\n logscale = 0\n for t in range(self.max_iter):\n d = prox_mapping(h_a_tilde, d, tau, 2)\n if self.has_dcap:\n d2 = proj_cap_ent(d, 1.0 / nu)\n # d_new = d_new/d_new.sum()\n if np.abs(d.sum() - d2.sum()) > 0.0001:\n print 'error'\n d = d2\n d_tilde = d\n dtH = np.dot(d_tilde, H)\n # dtH = np.dot(H.T, d_tilde)\n a_new = prox_mapping(-dtH, a, sig, 2)\n h_a_new = np.dot(H, a_new)\n # a_tilde = a_new + theta * (a_new - a)\n h_a_tilde = (1+theta) * h_a_new - theta * h_a\n a = a_new\n h_a = h_a_new\n d_bar *= t / (t + 1.0)\n d_bar += 1.0 / (t + 1) * d\n a_bar *= t / (t + 1.0)\n a_bar += 1.0 / (t + 1) * a\n # h_a_bar = np.dot(H, a_bar)\n h_a_bar = t / (t + 1.0) * h_a_bar + 1.0/(t+1) * h_a\n if int(np.log(t+1)) == logscale:\n logscale += 1\n self.iter_num.append(t)\n if self.has_dcap:\n min_margin = ksmallest2(h_a_bar, nu)\n self.primal_obj.append(-np.mean(min_margin))\n else:\n self.primal_obj.append(- np.min(h_a_bar))\n self.margin.append(-self.primal_obj[-1])\n self.dual_obj.append(-np.max(np.dot(d_bar, H)))\n self.gap.append(self.primal_obj[-1] - self.dual_obj[-1])\n self.err_tr.append(np.mean(h_a_bar < 0))\n # if t % 100 == 0:\n # print 'iter ' + str(t) + ' ' + str(self.gap[-1])\n if self.gap[-1] < self.epsi:\n break\n self.alpha = a_bar[:p / 2] - a_bar[p / 2:]\n self.d = d_bar\n print \" pd-boosting(python): max iter#%d: , actual iter#%d\" % (self.max_iter, t)", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def nscale_forward(self, x_1x, scales):\n assert 1.0 in scales, 'expected 1.0 to be the target scale'\n # Lower resolution provides attention for higher rez predictions,\n # so we evaluate in order: high to low\n scales = sorted(scales, reverse=True)\n\n pred = None\n\n for s in scales:\n x = nn.functional.interpolate(\n x_1x,\n scale_factor=s,\n align_corners=self.align_corners,\n mode='bilinear')\n outs = self.single_scale_forward(x)\n\n cls_out = outs['cls_out']\n attn_out = outs['logit_attn']\n\n if pred is None:\n pred = cls_out\n elif s >= 1.0:\n # downscale previous\n pred = scale_as(pred, cls_out, self.align_corners)\n pred = cls_out * attn_out + pred * (1 - attn_out)\n else:\n # s < 1.0: upscale current\n cls_out = cls_out * attn_out\n\n cls_out = scale_as(cls_out, pred, self.align_corners)\n attn_out = scale_as(attn_out, pred, self.align_corners)\n\n pred = cls_out + pred * (1 - attn_out)\n\n return [pred]", "def buildZPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.z_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_pzgxw + T.log(self.hyper['num_clust'])), axis=3), axis=[1,2])\r\n\r\n self.z_prior_modif = - T.maximum(self.hyper['treshold_z_prior'], - self.z_prior)", "def scale_module(module, scale):\n for p in module.parameters():\n p.detach().mul_(scale)\n return module", "def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def normalizePk(c,sig8):\n sig2now = sigma2fromPk(c,8.)\n #print 'sig2now=',sig2now\n c.pk *= sig8**2/sig2now\n c.logpk = M.log(c.pk)\n\n c.cp.scalar_amp[0] = c.cp.scalar_amp[0] * sig8**2/sig2now[0] #inelegant tuple change\n\n # for scipy splines\n c.pkSplineCoeff = SS.cspline1d(c.logpk)\n return sig2now", "def __init__(self, encoder_size, decoder_size, label_size):\n super(BilinearAttention, self).__init__()\n self.W = nn.Parameter(torch.zeros(label_size, decoder_size, encoder_size))\n self.u = nn.Parameter(torch.zeros(label_size, encoder_size))\n self.v = nn.Parameter(torch.zeros(label_size, decoder_size))\n self.b = nn.Parameter(torch.zeros(label_size))\n \n nn.init.xavier_uniform_(self.W)\n nn.init.xavier_uniform_(self.u)\n nn.init.xavier_uniform_(self.v)", "def kl_div_prior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq11 - gradient of prior\n #gradient of the KLD between posterior and prior wrt to prior\n #parameters theta, i.e. generative model parameters.\n #logits to probabilities\n posterior_probs=torch.sigmoid(posterior_logits)\n positive_probs=posterior_probs.detach()\n \n #samples from posterior are labelled positive\n positive_samples=posterior_binary_samples.detach()\n\n n_split=positive_samples.size()[1]//2\n positive_samples_left,positive_samples_right=torch.split(positive_samples,split_size_or_sections=int(n_split),dim=1)\n \n #-z_left^t J z_right\n pos_first_term=torch.matmul(positive_samples_left,self.prior.get_weights())*positive_samples_right\n \n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n \n #this gives [42,400] size\n #- z^t h\n #TODO this uses positive probs. Should it not use positive samples?\n # FIXME an indication are the negative ones where samples are used! On\n #other hand this is the only place this this used\n pos_sec_term=positive_probs*rbm_bias\n # pos_sec_term=positive_samples*rbm_bias\n\n # Energy = -z_left^t J z_right - z^t h\n pos_kld_per_sample=-(torch.sum(pos_first_term,axis=1)+torch.sum(pos_sec_term,axis=1))\n #samples from rbm are labelled negative\n\n #rbm_samples Tensor(\"zeros:0\", shape=(200, 200), dtype=float32)\n #this returns the full RBM set: left and right nodes concatenated\n\n #TODO What are these samples here?\n #TODO what's the impact of doing gibbs sampling here? does this make\n #sense?\n rbm_samples=self.prior.get_samples_kld(approx_post_samples=positive_samples_left,n_gibbs_sampling_steps=1)\n negative_samples=rbm_samples.detach()\n\n # print(self.prior.get_weights())\n n_split=negative_samples.size()[1]//2\n negative_samples_left,negative_samples_right=torch.split(negative_samples,split_size_or_sections=int(n_split),dim=1)\n neg_first_term=torch.matmul(negative_samples_left,self.prior.get_weights())*negative_samples_right\n \n #FIXME see above, the positive case looks different. Why?\n neg_sec_term=negative_samples*rbm_bias\n neg_kld_per_sample=(torch.sum(neg_first_term,axis=1)+torch.sum(neg_sec_term,axis=1))\n \n kld_per_sample=pos_kld_per_sample+neg_kld_per_sample\n\n return kld_per_sample", "def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0" ]
[ "0.62483156", "0.62137103", "0.6170398", "0.6155527", "0.6027734", "0.59788775", "0.5940418", "0.5933784", "0.5926682", "0.59190893", "0.5883833", "0.5880828", "0.58511233", "0.58176", "0.580629", "0.57443565", "0.57379764", "0.56918335", "0.56906885", "0.5651347", "0.56245714", "0.5566006", "0.5555777", "0.55522466", "0.5531442", "0.5529287", "0.5525406", "0.55097723", "0.55077803", "0.5507342", "0.55071104", "0.55015635", "0.54995644", "0.5486924", "0.5455957", "0.5441944", "0.54411495", "0.54333293", "0.5431112", "0.5412896", "0.5402067", "0.53863436", "0.53807265", "0.5377289", "0.53710955", "0.5364864", "0.5363325", "0.5356801", "0.5352587", "0.535089", "0.534178", "0.53375626", "0.53328145", "0.5327596", "0.53087896", "0.53018546", "0.5297434", "0.5289481", "0.5286531", "0.5283323", "0.5276735", "0.52751285", "0.52726513", "0.5265422", "0.5264382", "0.52618194", "0.52480155", "0.52444607", "0.5243103", "0.5235535", "0.5235063", "0.5231196", "0.5230933", "0.52281696", "0.52280366", "0.52236795", "0.5220341", "0.5209582", "0.5209339", "0.52088267", "0.5205222", "0.5200938", "0.5195891", "0.5194659", "0.5183059", "0.5182066", "0.51786953", "0.5175781", "0.51735646", "0.5171454", "0.51682025", "0.51623577", "0.5159956", "0.5159089", "0.51541775", "0.5153895", "0.51514137", "0.5151361", "0.51503897", "0.5149665" ]
0.5472967
34
r"""Scale Hyperprior with non zeromean Gaussian conditionals from D.
def mbt2018_mean(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model("mbt2018-mean", metric, quality, pretrained, progress, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def _likelihood_der1_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize jacobian\n dell_dscale = numpy.zeros((scale.size, ), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n if self.stochastic_traceinv:\n # Compute traceinv using stochastic estimation method. Note\n # that since Knp is not positive-definite, we cannot use\n # Cholesky method in imate. The only viable option is\n # Hutchinson's method.\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n trace_KnpKninv = self.mixed_cor.traceinv(\n eta, B=Knp, imate_options={'method': 'hutchinson'})\n else:\n trace_KnpKninv = imate.trace(self.KnpKninv[p], method='exact')\n\n # Compute the second component of trace of Knp * M\n KnpY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p])\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n CYtKnpY = numpy.matmul(self.C, YtKnpY)\n trace_CYtKnpY = numpy.trace(CYtKnpY)\n\n # Compute trace of Knp * M\n trace_KnpM = trace_KnpKninv - trace_CYtKnpY\n\n # Compute zMKnpMz\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n zMKnpMz = numpy.dot(self.Mz, KnpMz)\n\n # Derivative of ell w.r.t p-th element of distance scale\n dell_dscale[p] = -0.5*trace_KnpM + 0.5*zMKnpMz / sigma2\n\n return dell_dscale", "def gauss1d(sigma=1,order=0,nstd=3,x=np.empty((0,)),normalize=True):\n\tassert sigma>0, \"sigma cannot be equal to zero\"\n\n\tx_max = nstd * sigma\n\tif x.size==0:\n\t\tx = np.arange(-x_max,x_max+1)\n\tvar = sigma**2\n\tnum = x * x\n\tden = 2 * var\n\tg = np.exp(-num/den) / (np.sqrt(2*np.pi)*sigma)\n\tif order==1:\n\t\tg *= -x/var\n\telif order==2:\n\t\tg *= (num-var)/var**2\n\tif normalize:\n\t\t# return g / np.linalg.norm(g,1)\n\t\treturn g / g.sum()\n\telse:\n\t\treturn g", "def normal_distr(x, mu, sigma, s=1):\n \n return s * 1/(sigma * torch.sqrt(torch.tensor(2 * np.pi))) * torch.exp((-1/2) * ((x - mu) / sigma) ** 2)", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n\n return g.unsqueeze(0).unsqueeze(0)", "def _likelihood_der2_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize Hessian\n d2ell_dscale2 = numpy.zeros((scale.size, scale.size), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n MKnpMz = self.M_dot(self.C, self.Y, eta, KnpMz)\n\n for q in range(scale.size):\n\n # 1. Compute zMKnqMKnpMz\n if p == q:\n KnqMz = KnpMz\n else:\n KnqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[q])\n zMKnqMKnpMz = numpy.dot(KnqMz, MKnpMz)\n\n # 2. Compute zMKnpqMz\n KnpqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[p, q])\n zMKnpqMz = numpy.dot(self.Mz, KnpqMz)\n\n # 3. Computing trace of Knpq * M in three steps\n\n # Compute the first component of trace of Knpq * Kninv\n Knpq = self.mixed_cor.get_matrix(eta, derivative=[p, q])\n if self.stochastic_traceinv:\n trace_KnpqKninv = self.mixed_cor.traceinv(\n eta, B=Knpq,\n imate_options={'method': 'hutchinson'})\n else:\n KnpqKninv = Knpq @ self.Kninv\n trace_KnpqKninv = imate.trace(KnpqKninv, method='exact')\n\n # Compute the second component of trace of Knpq * M\n KnpqY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p, q])\n YtKnpqY = numpy.matmul(self.Y.T, KnpqY)\n CYtKnpqY = numpy.matmul(self.C, YtKnpqY)\n trace_CYtKnpqY = numpy.trace(CYtKnpqY)\n\n # Compute trace of Knpq * M\n trace_KnpqM = trace_KnpqKninv - trace_CYtKnpqY\n\n # 4. Compute trace of Knp * M * Knq * M\n\n # Compute first part of trace of Knp * M * Knq * M\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n Knq = self.mixed_cor.get_matrix(eta, derivative=[q])\n if self.stochastic_traceinv:\n trace_KnpMKnqM_1 = self.mixed_cor.traceinv(\n eta, B=Knq, C=Knp,\n imate_options={'method': 'hutchinson'})\n else:\n KnpKninvKnqKninv = numpy.matmul(self.KnpKninv[p],\n self.KnpKninv[q])\n trace_KnpMKnqM_1 = imate.trace(KnpKninvKnqKninv,\n method='exact')\n\n # Compute the second part of trace of Knp * M * Knq * M\n KnpY = Knp @ self.Y\n if p == q:\n KnqY = KnpY\n else:\n KnqY = Knq @ self.Y\n KninvKnqY = self.mixed_cor.solve(KnqY, eta=eta)\n YtKnpKninvKnqY = numpy.matmul(KnpY.T, KninvKnqY)\n F21 = numpy.matmul(self.C, YtKnpKninvKnqY)\n F22 = numpy.matmul(self.C, YtKnpKninvKnqY.T)\n trace_KnpMKnqM_21 = numpy.trace(F21)\n trace_KnpMKnqM_22 = numpy.trace(F22)\n\n # Compute the third part of trace of Knp * M * Knq * M\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n if p == q:\n YtKnqY = YtKnpY\n else:\n YtKnqY = numpy.matmul(self.Y.T, KnqY)\n Dp = numpy.matmul(self.C, YtKnpY)\n if p == q:\n Dq = Dp\n else:\n Dq = numpy.matmul(self.C, YtKnqY)\n D = numpy.matmul(Dp, Dq)\n trace_KnpMKnqM_3 = numpy.trace(D)\n\n # Compute trace of Knp * M * Knq * M\n trace_KnpMKnqM = trace_KnpMKnqM_1 - trace_KnpMKnqM_21 - \\\n trace_KnpMKnqM_22 + trace_KnpMKnqM_3\n\n # 5. Second \"local\" derivatives w.r.t scale\n local_d2ell_dscale2 = -0.5*trace_KnpqM + 0.5*trace_KnpMKnqM + \\\n (0.5*zMKnpqMz - zMKnqMKnpMz) / sigma2\n\n # Computing total second derivative\n dp_log_sigma2 = -numpy.dot(self.Mz, KnpMz) / \\\n (self.rdof*sigma2)\n if p == q:\n dq_log_sigma2 = dp_log_sigma2\n else:\n dq_log_sigma2 = -numpy.dot(self.Mz, KnqMz) / \\\n (self.rdof*sigma2)\n d2ell_dscale2[p, q] = local_d2ell_dscale2 + \\\n 0.5 * self.rdof * dp_log_sigma2 * dq_log_sigma2\n\n if p != q:\n d2ell_dscale2[q, p] = d2ell_dscale2[p, q]\n\n return d2ell_dscale2", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def der_gauss(shape,sigma):\r\n m,n = [(ss-1.)/2. for ss in shape]\r\n nx, ny = shape\r\n xv = np.linspace(-1, 1, nx)\r\n yv = np.linspace(-1, 1, ny)\r\n x1, y1 = np.meshgrid(xv, yv)\r\n \r\n \r\n h = np.exp( -(x1*x1 + y1*y1) / (2.*sigma*sigma) )\r\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\r\n sumh = h.sum()\r\n if sumh != 0:\r\n h /= sumh\r\n gauss_x = -x1/sigma*sigma\r\n h1 = gauss_x * h\r\n return h1", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def buildZPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.z_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_pzgxw + T.log(self.hyper['num_clust'])), axis=3), axis=[1,2])\r\n\r\n self.z_prior_modif = - T.maximum(self.hyper['treshold_z_prior'], - self.z_prior)", "def prior(self, batch_size: int = 1) -> Distribution:\n prior_params = self.prior_params.expand(\n batch_size, *self.prior_params.shape[-1:]\n )\n mu, log_sigma = prior_params.chunk(2, dim=-1)\n\n # return the distribution `p(z)`\n return Normal(mu, log_sigma.exp())", "def onedgauss(x,H,A,dx,w):\n #H,A,dx,w = params\n return H+A*np.exp(-(x-dx)**2/(2*w**2))", "def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def init_weights(self, t, prior, scaled_prior = False):\n from scipy.cluster.vq import kmeans2\n from scipy.spatial.distance import cdist\n \n # check if t has the right shape\n if not t.ndim == 2:\n t = t[:, None]\n \n if scaled_prior:\n #self.w1 = np.random.normal(loc=0.0, scale = 1,size=[H, d+1])/np.sqrt(d+1) # 1st layer weights + bias\n #self.w2 = np.random.normal(loc=0.0, scale = 1,size=[ny, H+1])/np.sqrt(H+1) # 2nd layer weights + bias\n sigma1 = 1.0/np.sqrt(self.d+1)\n sigma2 = 1.0/np.sqrt(self.H+1)\n else:\n # init weights from gaussian with width given by prior\n sigma1 = 1.0/np.sqrt(prior)\n sigma2 = sigma1\n \n self.w1 = np.random.normal(loc=0.0, scale = 1,size=[self.H, self.d+1]) * sigma1 # 1st layer weights + bias\n self.w2 = np.random.normal(loc=0.0, scale = 1,size=[self.ny, self.H+1]) * sigma2 # 2nd layer weights + bias\n \n # init biases (taken from netlab, gmminit.m)\n [centroid, label] = kmeans2(t, self.M)\n cluster_sizes = np.maximum(np.bincount(label), 1) # avoid empty clusters\n alpha = cluster_sizes/np.sum(cluster_sizes)\n if (self.M > 1):\n # estimate variance from the distance to the nearest centre\n sigma = cdist(centroid, centroid)\n sigma = np.min(sigma + np.diag(np.diag(np.ones(sigma.shape))) * 1000, 1)\n sigma = np.maximum(sigma, np.finfo(float).eps) # avoid underflow\n else:\n # only one centre: take average variance\n sigma = np.mean(np.diag([np.var(t)]))\n # set biases, taken from netlab, mdninit.m\n self.w2[0:self.M,0] = alpha\n self.w2[self.M:2*self.M,0] = np.log(sigma)\n self.w2[2*self.M:,0] = np.reshape(centroid, [self.M * self.c])", "def _prior_gaussian(self, x_start):\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)", "def setupMixedPrior(self):\n\n if self.namePrior.find('mixed') < 0:\n return\n\n # we set up the default parameters for bounded flat prior,\n # then update them with non-flat examples\n if np.size(self.hyper) < 7:\n self.setupDefaultPars()\n\n # Adjust the hyperparameters for defaults.\n self.hyper[0][2] = 0.45\n self.hyper[1][2] = 0.05\n self.hyper[0][3] = 16.3\n self.hyper[1][3] = 0.1\n\n nMeths = np.shape(self.hyper)[-1]\n self.mixedNames = ['binaryBoundedOne' for i in range(nMeths)]\n\n ### Let's try some gaussians. Eccentricity and period\n self.mixedNames[2] = 'gaussianOne'\n self.mixedNames[3] = 'gaussianOne'\n\n self.findMixedMethods()", "def GaussianPosteriorSample(bs, ls) :\n def gps(args) :\n mu, log_var = args\n eps = K.random_normal(shape=(bs, ls), mean=0.0, stddev=1.0) # 10 x 2\n return mu + K.exp(log_var / 2.) * eps\n return gps", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def set_gaussian(self, X, sigma=0):\n sigma = float(sigma)\n if sigma < 0:\n raise ValueError('sigma should be positive')\n self.set_euclidian(X)\n d = self.weights\n\n if sigma == 0:\n sigma = (d ** 2).mean()\n\n w = np.exp(- (d ** 2) / (2 * sigma))\n self.weights = w", "def gauss_hermite(dim=1, num_quad_pts=20):\n # sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights\n sigma_pts, weights = mvhermgauss(num_quad_pts, dim)\n sigma_pts = np.sqrt(2) * sigma_pts.T\n weights = weights.T * pi ** (-0.5 * dim) # scale weights by 1/√π\n return sigma_pts, weights", "def cmdscale(D):\n # Number of points \n n = len(D)\n # Centering matrix \n H = np.eye(n) - np.ones((n, n))/n\n # YY^T \n B = -H.dot(D**2).dot(H)/2\n # Diagonalize \n evals, evecs = np.linalg.eigh(B)\n # Sort by eigenvalue in descending order \n idx = np.argsort(evals)[::-1]\n evals = evals[idx]\n evecs = evecs[:,idx]\n # Compute the coordinates using positive-eigenvalued components only \n w, = np.where(evals > 0)\n L = np.diag(np.sqrt(evals[w]))\n V = evecs[:,w]\n Y = V.dot(L)\n return Y, evals", "def normalize(X, mu, sigma):\n return (X - mu) / sigma", "def convertHermiteToNormal(self,x):\n return self.sigma*x+self.untruncatedMean()", "def mvhermgauss(H, D, dtype=torch.float32):\n gh_x, gh_w = hermgauss(H)\n x = numpy.array(numpy.meshgrid(*(D*(gh_x,))))\n w = numpy.array(numpy.meshgrid(*(D*(gh_w,)))).prod(1)\n x, w = torch.as_tensor(x, dtype=dtype), torch.as_tensor(w, dtype=dtype)\n return x, w", "def cmdscale(D):\n # Number of points\n n = len(D)\n\n # Centering matrix\n H = np.eye(n) - np.ones((n, n))/n\n\n # YY^T\n B = -H.dot(D**2).dot(H)/2\n\n # Diagonalize\n evals, evecs = np.linalg.eigh(B)\n\n # Sort by eigenvalue in descending order\n idx = np.argsort(evals)[::-1]\n evals = evals[idx]\n evecs = evecs[:, idx]\n\n # Compute the coordinates using positive-eigenvalued components only\n w, = np.where(evals > 0)\n L = np.diag(np.sqrt(evals[w]))\n V = evecs[:, w]\n Y = V.dot(L)\n\n return Y, evals[evals > 0]", "def gauss_kern(size, sigma=1.0):\n h1 = size[0]\n h2 = size[1]\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );\n return g / g.sum()", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def posterior(mu, x, sigma):\n post = like(x, sigma, mu) * prior(mu)\n evidencia = np.trapz(post, mu)\n return post/evidencia", "def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\n self.sumWeight = priorWeight\n self.scale = 1\n self.AtA = np.eye(self.n)*priorWeight\n self.AtAinv = np.eye(self.n)/priorWeight\n self.Atb = xPrior*priorWeight\n self.btb = np.dot(xPrior,xPrior)*priorWeight\n self.degenerate = False\n self.x = xPrior", "def _denormalizeState(self, Z : vector) -> vector:\n return Z / self.D", "def gauss_kern(sigma, size):\r\n size = int(np.floor(size/2))\r\n sizey = size\r\n x, y = scipy.mgrid[-size:size+1, -sizey:sizey+1]\r\n g = scipy.exp(-(x**2+y**2) / (2*(sigma)**2))\r\n return np.ravel(g / g.max())", "def normal_lower_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def growth_factor_scale_independent(self, z):\n z = np.atleast_1d(z)\n nz = len(z)\n #if self.M_nu_tot == 0. and self.w0 == -1. and self.wa==0.:\n # aa = 1./(1.+z)\n # ww = self.w0 + (1.-aa)*self.wa\n # d1 = aa*ss.hyp2f1(1/3., 1., 11/6., -aa**3/self.Omega_m*(1.-self.Omega_m))/ss.hyp2f1(1/3., 1., 11/6., -(1.-self.Omega_m)/self.Omega_m)\n #else:\n # d1 = np.zeros(nz)\n # for i in range(nz):\n # LCDM, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., z[i], np.inf)\n # d1[i] = LCDM*self.H_massive(z[i])/self.H0\n # LCDM0, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., 0., np.inf)\n # d1 = d1/LCDM0\n d1 = np.zeros(nz)\n for i in range(nz):\n LCDM, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., z[i], np.inf)\n d1[i] = LCDM*self.H_massive(z[i])/self.H0\n LCDM0, _ = sint.quad(lambda x: (1+x)*(self.H0/self.H_massive(x))**3., 0., np.inf)\n d1 = d1/LCDM0\n return d1", "def NORMAL_D(YNORM,Q,CMAN,B0,S,S0):\r\n \r\n if (Q < 0.):\r\n YNORM = 0.\r\n return\r\n \r\n C1 = (CMAN*Q)/np.sqrt(S0)\r\n C2 = 2*np.sqrt(1 + S*S)\r\n YNORM = (CMAN**2*(Q/B0)**2/S0)**0.3333\r\n for i in range(999):\r\n FY = AR(YNORM)*HR(YNORM)**0.6667 - C1\r\n DFDY = 1.6667*BW(YNORM)*HR(YNORM)**0.6667 - 0.6667*HR(YNORM)**1.6667*C2\r\n YNEW = YNORM - FY/DFDY\r\n ERR = abs((YNEW - YNORM)/YNEW)\r\n YNORM = YNEW.copy()\r\n if (ERR < 1.0E-06):\r\n return\r\n return", "def klucb_gauss(x, d, sig2=1., precision=0.):\n return x + sqrt(2*sig2*d)", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])", "def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior", "def posterior(self, x: Tensor) -> Distribution:\n # Compute the parameters of the posterior\n h_x = self.encoder(x)\n mu, log_sigma = h_x.chunk(2, dim=-1)\n\n # Return a distribution `q(z|x) = N(z | \\mu(x), \\sigma(x))`\n return Normal(mu, log_sigma.exp())", "def g_multivariate_normal(x,M):\n return .5*np.dot(x,M+M.T)", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def normal_lower_bound(probability: float,\n mu: float = 0,\n sigma: float = 1) -> float:\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def gauss_kern(sigma,h):\n h1 = h\n h2 = h\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n # sigma = 10.0\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) )\n return g / g.sum()", "def deriv_sigmoid(self,z):\n return np.exp(-z) / ( (1 + np.exp(-z)) ** 2 )", "def __init__(self, dynamod, measmod, initrv, alpha, beta, kappa, **kwargs):\n if not issubclass(type(dynamod), DiscreteGaussianModel):\n raise ValueError(\n \"_DiscDiscUnscentedKalman requires \" \"a Gaussian dynamic model.\"\n )\n if not issubclass(type(measmod), DiscreteGaussianModel):\n raise ValueError(\n \"_DiscDiscUnscentedKalman requires \" \"a Gaussian measurement model.\"\n )\n super().__init__(dynamod, measmod, initrv)\n self.ut = UnscentedTransform(self.dynamod.dimension, alpha, beta, kappa)", "def normal_lower_bound(probability, mu=0, sigma=1):\r\n return ds_probability.inverse_normal_cdf(1 - probability, mu, sigma)", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def scale(z,h=0.7,omegalambda=0.7,omegam=0.3,omegak=0.0):\n return distcalc(z,h,omegalambda,omegam,omegak)['scale']", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1- probability, mu, sigma)", "def scale_dt_d(self,dt_d,nu_i,nu_f,beta=KOLMOGOROV_BETA):\n # dt_d = make_quant(dt_d, 's')\n if beta < 4:\n exp = 2.0/(beta-2) #(6.0/5)\n elif beta > 4:\n exp = float(beta-2)/(6-beta)\n return dt_d*(nu_f/nu_i)**exp", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def convertNormalToHermite(self,y):\n return (y-self.untruncatedMean())/(self.sigma)", "def _normalizeState(self, Z : vector) -> vector:\n return Z * self.D", "def normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def bandwidth(d):\n gz = 2 * gamma(0.5 * (d+1)) / gamma(0.5 * d)\n return 1. / (2. * gz**2)", "def zonotope_inside_scale(z,Y):\n model=Model(\"inside_scale\")\n n,N=Y.shape\n p=np.empty((z.G.shape[1],N),dtype='object')\n scale=model.addVar(obj=1)\n for row in range(p.shape[0]):\n for column in range(N):\n p[row,column]=model.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY)\n model.update()\n for row in range(p.shape[0]):\n for column in range(N):\n model.addConstr(p[row,column]<=scale)\n model.addConstr(-p[row,column]<=scale)\n constraints_AB_eq_CD(model,np.eye(n),Y-z.x,z.G,p)\n model.setParam('OutputFlag', 0)\n model.optimize()\n return scale.X", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def standardize(X, axis=0, ddof=0):\n\n # Modified from scikit-learn.preprocessing.scale()!\n\n #X = np.asarray(X)\n X = np.asarray(X, dtype=np.float) # XXX: what about dtype? convert to float64? for higher precision? let client decide?\n Xr = np.rollaxis(X, axis) # view on X to enable broadcasting on the axis we are interested in\n \n mean_ = Xr.mean(axis=0)\n std_ = Xr.std(axis=0, ddof=ddof)\n std_[std_ == 0.0] = 1.0 # avoid NaNs due to div/zero\n\n # center mean on zero\n Xr -= mean_\n\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n mean_1 = Xr.mean(axis=0)\n if not np.allclose(mean_1, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n mean_ += mean_1\n\n # scale to unit variance\n Xr /= std_\n\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again.\n mean_2 = Xr.mean(axis=0)\n if not np.allclose(mean_2, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0.\")\n Xr -= mean_2\n mean_ += mean_2\n\n # Additional check if variances are 'close to one'\n std_1 = Xr.std(axis=0, ddof=ddof)\n if not np.allclose(std_1, 1.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. Standard deviation \"\n \"not close to one after scaling.\")\n\n return X, mean_, std_", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def __init__(self, std: Union[torch.Tensor, float, Distribution], initial_dist: Distribution = None):\n\n if not isinstance(std, torch.Tensor):\n normal = Normal(0., 1.)\n else:\n normal = Normal(0., 1.) if std.shape[-1] < 2 else Independent(Normal(torch.zeros_like(std), std), 1)\n\n super().__init__((_f, _g), (std,), initial_dist or normal, normal)", "def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e", "def test_predict_uncertain_inputs(self):\n X = np.linspace(-5,5, 10)[:, None]\n Y = 2*X + np.random.randn(*X.shape)*1e-3\n m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)\n m.Gaussian_noise[:] = 1e-4\n m.X.mean[:] = X[:]\n m.X.variance[:] = 1e-5\n m.X.fix()\n m.optimize()\n X_pred_mu = np.random.randn(5, 1)\n X_pred_var = np.random.rand(5, 1) + 1e-5\n from GPy.core.parameterization.variational import NormalPosterior\n X_pred = NormalPosterior(X_pred_mu, X_pred_var)\n # mu = \\int f(x)q(x|mu,S) dx = \\int 2x.q(x|mu,S) dx = 2.mu\n # S = \\int (f(x) - m)^2q(x|mu,S) dx = \\int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S\n Y_mu_true = 2*X_pred_mu\n Y_var_true = 4*X_pred_var\n Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)\n np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-3)\n np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-3)", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def normal_upper_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(probability, mu, sigma)", "def __call__(self, *args, **kwargs):\n mu, sigma = self.condition(args, **kwargs)\n return tf.contrib.distributions.Normal(loc=mu, scale=sigma)", "def conditionalDistribution(self, d, v):\n probabilities_ts = np.ones((self.n_topic_components, self.n_sentiment_components))\n firstFactor = (self.n_ds[d] + self.alphaVec) / \\\n (self.n_d[d] + np.sum(self.alphaVec))\n secondFactor = np.zeros((self.n_topic_components,self.n_sentiment_components))\n for s in range(self.n_sentiment_components):\n \n secondFactor[:,s] = ((self.n_dst[d, s, :] + self.gammaVec) / \\\n (self.n_ds[d, s] + np.sum(self.gammaVec)))\n\n thirdFactor = (self.n_vts[v,:, :] + self.beta) / \\\n (self.n_ts + self.n_vts.shape[0] * self.beta)\n\n #forthFactor = np.zeros((self.n_topic_components, self.n_sentiment_components))\n #for k in range(self.n_topic_components):\n # forthFactor[k,:] = np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings.T)))\n \n forthFactor = np.exp(np.dot(self.topic_embeddings,self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings,self.word_embeddings.T)),-1)\n probabilities_ts *= firstFactor[:, np.newaxis]\n #probabilities_ts *= secondFactor * thirdFactor\n probabilities_ts *= secondFactor * ((1-self.lambda_)*thirdFactor + self.lambda_*forthFactor)\n probabilities_ts /= np.sum(probabilities_ts)\n \n return probabilities_ts", "def bias_prior(self):", "def conditionalDistribution(self, d, m, w):\r\n probabilities_ts = np.ones((self.numTopics, self.numSentiments))\r\n\r\n # firstfactor 수정\r\n firstFactor = (self.n_wkl[w, :, :] + self.beta) / \\\r\n (self.n_kl + self.n_wkl.shape[0] * self.beta) # dim(K x L)\r\n\r\n secondFactor = (self.ns_dk[d, :] + self.alpha) / \\\r\n (self.ns_d[d] + self.numTopics * self.alpha) # dim(K x 1)\r\n\r\n thirdFactor = (self.ns_dkl[d, :, :] + self.gamma) / \\\r\n (self.ns_dk[d] + self.numSentiments * self.gamma)[:, np.newaxis] # dim (K x L)\r\n\r\n probabilities_ts *= firstFactor * thirdFactor\r\n probabilities_ts *= secondFactor[:, np.newaxis]\r\n probabilities_ts /= np.sum(probabilities_ts)\r\n return probabilities_ts", "def init_vector(self,x,dim):\n if dim == \"noise\":\n self.prior.sqrtM.init_vector(x,1)\n else:\n self.prior.init_vector(x,dim)", "def set_hyper_parameters(self, x):\n self.set_scale(x[0])", "def funcG(p, x):\n A, mu, sigma, zerolev = p\n return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )", "def _fspecial_gauss_2d(self, size, sigma):\n gaussian_vec = self._fspecial_gauss_1d(size, sigma)\n return torch.outer(gaussian_vec, gaussian_vec)", "def gaussseidel_poissoneq(A, x0):\n return 1", "def fitSymmetricGaussian2D(data, sigma):\n params = [np.min(data),\n np.max(data),\n 0.5 * data.shape[0],\n 0.5 * data.shape[1],\n 2.0 * sigma]\n return fitAFunctionLS(data, params, symmetricGaussian2D)", "def update2(self, es, **kwargs):\n self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong!\n p = self.ps\n try: pc_for_ps = 'pc for ps' in es.opts['vv'] # just in case\n except: pc_for_ps = False # 'vv' has an incompatible format or does't exist\n if pc_for_ps:\n # was: es.D**-1 * np.dot(es.B.T, es.pc)\n if es.opts['verbose'] > 5 and es.countiter == 1:\n utils.print_message('pc for ps is active')\n p = es.sm.transform_inverse(es.pc)\n try: # to filter coordinates or a\n p = es.path_for_sigma_update(p) # subspace depending on the state\n except AttributeError:\n if 11 < 3 and len(es.opts['integer_variables']):\n m = (\"Missing ``path_for_sigma_update`` attribute in {}.\"\n \"\\n This is usually not a problem unless integer mutations are used.\"\n \"\".format(type(es)))\n _warnings.warn(m)\n N = len(p)\n if N == 0: # all variables are masked, do nothing\n return 1\n if es.opts['CSA_squared']:\n s = (sum(_square(p)) / N - 1) / 2\n # sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed\n # divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1\n else:\n s = _norm(p) / Mh.chiN(N) - 1\n s *= self.cs / self.damps\n s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)\n # \"error\" handling\n if s_clipped != s:\n utils.print_warning('sigma change np.exp(' + str(s) + ') = ' + str(np.exp(s)) +\n ' clipped to np.exp(+-' + str(self.max_delta_log_sigma) + ')',\n 'update',\n 'CMAAdaptSigmaCSA',\n es.countiter, es.opts['verbose'])\n self.delta *= np.exp(s_clipped)\n return np.exp(s_clipped)", "def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)", "def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())", "def ExponentialPosteriorSample(bs, ls) :\n def exps(args) :\n lamb = args\n eps = K.random_uniform(shape=(bs, ls))\n ans = (-1./lamb) * K.log(-eps + 1)\n return ans\n return exps", "def g1(self, nx, ny, x_des):\n\n # :evaluating unscaled constraints at the initial point\n g_1_0 = self.g1_unscaled(nx, ny, .5 * np.ones(4 * nx + 5 * ny))\n\n # :evaluating unscaled constraints:\n g_1 = self.g1_unscaled(nx, ny, x_des)\n\n # :define the threshold \"tow\" to translate the scaled constraint\n # :alpha determines to what extent the inactive constraints are satisfied\n tow, alpha = [], self.alpha_g1\n [tow.append(i) if self.mu_g1[list(g_1_0).index(i)] < self.p else tow.append(alpha + (1 - alpha) * i) for i in g_1_0]\n\n # define the translated constraint\n g_1_translated = []\n [g_1_translated.append(g_1[i] - tow[i]) for i in range(nx)]\n return g_1_translated", "def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled", "def sigmaz():\n return _SIGMAZ.copy()", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def bivariate_normal(x, mu, sigma, rho):\n\treturn (1. / (2. * math.pi * sigma[0] * sigma[1] * math.sqrt(1. - rho * rho))) * \\\n\t math.exp( (-1. / (2. * (1. - rho * rho))) * \\\n\t ( ( math.pow(x[0] - mu[0], 2) / sigma[0] * sigma[0] ) + \\\n\t ( math.pow(x[1] - mu[1], 2) / sigma[1] * sigma[1] ) - \\\n\t ( 2. * rho * (x[0] - mu[0]) * (x[1] - mu[1]) / (sigma[0] * sigma[1]) ) ) )", "def update(self, es, **kwargs):\n es.sigma = self.coefficient * es.sp.weights.mueff * _norm(es.mean) / es.N / es.sp.cmean", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def lhs_scaled(space: data.Space,\n num_samples: int,\n criterion: Optional[Text] = None,\n random_state: Optional[int] = None):\n lhs = sampler.Lhs(lhs_type=\"classic\", criterion=criterion)\n design = lhs.generate(space.dimensions, num_samples, random_state)\n return design", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n\r\n mu = np.zeros(2)\r\n Cov = np.array([[beta, 0], [0, beta]])\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu, Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Prior Distribution of α\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.show()\r\n \r\n return", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def __init__(self,\n dtype=tf.float32,\n validate_args=False,\n name='conditional_scale'):\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n super(ConditionalScale, self).__init__(\n forward_min_event_ndims=0,\n is_constant_jacobian=True,\n validate_args=validate_args,\n dtype=dtype,\n parameters=parameters,\n name=name)", "def _base_dist(self, *args, **kwargs):\n return tfd.TransformedDistribution(\n distribution=tfd.Normal(*args, **kwargs),\n bijector=tfp.bijectors.Sigmoid(),\n name=\"LogitNormal\",\n )" ]
[ "0.69035596", "0.60564435", "0.5897708", "0.5843555", "0.5838015", "0.5830661", "0.5816271", "0.5786775", "0.5709402", "0.56590503", "0.56590503", "0.56466454", "0.5611644", "0.5611628", "0.5609158", "0.5600361", "0.55973405", "0.55847114", "0.55843025", "0.5575209", "0.55476475", "0.5539354", "0.5537551", "0.5523137", "0.5516598", "0.55065876", "0.5499561", "0.54955065", "0.54898775", "0.5458864", "0.54113966", "0.5395017", "0.5390151", "0.5386603", "0.53835887", "0.5377864", "0.5376765", "0.536095", "0.5348596", "0.5338788", "0.53293085", "0.5287393", "0.5285963", "0.5280356", "0.527375", "0.52621436", "0.52598846", "0.5259035", "0.5252774", "0.52516776", "0.5251148", "0.5248263", "0.52419555", "0.52401125", "0.52401125", "0.523206", "0.5228793", "0.52131534", "0.5207548", "0.52053064", "0.51977277", "0.51977277", "0.51977277", "0.51977277", "0.5192242", "0.5190909", "0.5189436", "0.5183833", "0.51761806", "0.51747996", "0.51739186", "0.5170167", "0.5163703", "0.5160557", "0.5159454", "0.5158906", "0.51554483", "0.515456", "0.5154036", "0.5140465", "0.5129674", "0.51276827", "0.5126064", "0.51257896", "0.51231736", "0.5122517", "0.51167756", "0.51016784", "0.50979424", "0.5095038", "0.509305", "0.509062", "0.5086353", "0.50848067", "0.50783163", "0.5071324", "0.50713223", "0.5070573", "0.506708", "0.50637686", "0.5060281" ]
0.0
-1
r"""Joint Autoregressive Hierarchical Priors model from D.
def mbt2018(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model("mbt2018", metric, quality, pretrained, progress, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, D, H, S, to_learn=[\"W\", \"pi\", \"sigma\"], comm=MPI.COMM_WORLD):\n self.comm = comm\n self.noise_policy = {}\n self.to_learn = to_learn\n self.D = D\n self.H = H\n self.S = S\n\n tol = 1e-5\n self.noise_policy = {\n \"W\": (-np.inf, +np.inf, False, None),\n \"pi\": (tol, 1.0 - tol, False, None),\n \"sigma\": (tol, +np.inf, False, None),\n }\n\n # Numerical stabilization for lpj computation\n self.B_max = 0.0\n self.B_max_shft = np.inf", "def rip1_to_parp():\n Monomer('MLKL', ['bRHIM', 'state'], {'state':['unmod', 'active', 'inactive']})\n Parameter('MLKL_0' , 1.0e6) # molecules per cell\n alias_model_components()\n Initial(MLKL(bRHIM = None, state = 'unmod'), MLKL_0) # MLKL\n \n Rule('Rip_PO4lation', RIP1(bRHIM=ANY, state = 'unmod')%RIP3(bRHIM=ANY, state='unmod') >> RIP1(bRHIM=ANY, state = 'po4')%RIP3(bRHIM=ANY, state = 'po4'), Parameter('k19', 1e-1))\n Rule('Rip_PO4lation_alt', RIP1(bRHIM=ANY, state = 'deub')%RIP3(bRHIM=ANY, state='unmod') >> RIP1(bRHIM=ANY, state = 'po4')%RIP3(bRHIM=ANY, state = 'po4'), Parameter('k19a', 1e-1))\n \n catalyze_state(RIP1(state='po4'), 'bPARP', MLKL(), 'bRHIM', 'state', 'unmod', 'active', [1e-6,1e-3, 1e-1])\n catalyze_state(MLKL(state='active'), 'bRHIM', MLKL(), 'bRHIM', 'state', 'unmod', 'active', [1e-7, 0.2, 0.01])", "def generate_homography_nn_sgd(self):\n # Create the NN\n self.set_optimizer_sgd()\n self.set_callback(utils.lr_callback)\n self.build_model()\n self.compile()", "def _ang_part(self, dP):\n import pandas as pd\n dsP = pd.DataFrame(dP, columns=[\"i\", \"Pij\", \"nisi\", \"njsj\", \"l\"])\n dsP[\"Pij\"] = dsP[\"Pij\"].apply(self._renorm_p)\n return dsP.groupby(\"l\").sum()[\"Pij\"].to_dict()", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def buildP(self):\r\n\r\n print 'Building P ...'\r\n\r\n #---Building p(y|x)---#\r\n pygx_params_mlp = MLP(activations=self.hyper['pygx_activs'],\r\n dims=self.hyper['pygx_dims'],\r\n weights_init=self.hyper['pygx_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pygx_params = pygx_params_mlp.apply(self.x.reshape((self.x.shape[0]*self.x.shape[1],self.x.shape[2])))\r\n pygx_params = pygx_params.reshape((self.x.shape[0],self.x.shape[1],2*self.hyper['y_dim']))\r\n pygx_params_mlp.initialize()\r\n\r\n # self.pygx_mu.shape == (minibatch size, L_x , num of dimension of y)\r\n self.pygx_mu = pygx_params[:,:,:self.hyper['y_dim']]\r\n\r\n # self.pygx_var.shape == (minibatch size, L_x, num of dimension of y)\r\n self.pygx_var = T.exp( pygx_params[:,:,self.hyper['y_dim']:] )\r\n\r\n\r\n #---Building graph for the density of p(y|x)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.y.dimshuffle(0,'x',1) - self.pygx_mu)**2/(2*self.pygx_var), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['y_dim']/2.)*T.exp(T.sum(T.log(self.pygx_var), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n pygx = norm_cst*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_pygx = T.log(pygx + little_num)\r\n\r\n #---Building NN for p(x|z=j,w) for all j---#\r\n pxgzw_mus = [None]*self.hyper['num_clust']\r\n pxgzw_vars = [None]*self.hyper['num_clust']\r\n pxgzw = [None]*self.hyper['num_clust']\r\n\r\n for j in range(self.hyper['num_clust']):\r\n\r\n pxgzw_params_mlp = MLP(activations=self.hyper['pxgzw_activs'][j],\r\n dims=self.hyper['pxgzw_dims'][j],\r\n weights_init=self.hyper['pxgzw_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pxgzw_params = pxgzw_params_mlp.apply(self.w.reshape((self.w.shape[0]*self.w.shape[1],self.w.shape[2])))\r\n pxgzw_params = pxgzw_params.reshape((self.w.shape[0],self.w.shape[1], 2*self.hyper['x_dim']))\r\n pxgzw_params_mlp.initialize()\r\n\r\n # pxgzw_mus[j].shape == (minibatch size, L_w , num of dimension of x)\r\n pxgzw_mus[j] = pxgzw_params[:,:,:self.hyper['x_dim']]\r\n\r\n # pxgzw_vars[j].shape == (minibatch size, L_w, num of dimension of x)\r\n pxgzw_vars[j] = T.exp( pxgzw_params[:,:,self.hyper['x_dim']:] )\r\n\r\n #---Building graph for the density of p(x|z=j,w)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x.dimshuffle(0,'x',1,2) - pxgzw_mus[j].dimshuffle(0,1,'x',2))**2/(2*pxgzw_vars[j].dimshuffle(0,1,'x',2)), axis=3)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(pxgzw_vars[j]), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of w samples (L_w), # of x samples (L_x))\r\n pxgzw[j] = norm_cst.dimshuffle(0,1,'x')*T.exp(inside_exp)\r\n\r\n\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_mus = T.concatenate([mu.dimshuffle(0,1,'x',2) for mu in pxgzw_mus], axis=2)\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_vars = T.concatenate([var.dimshuffle(0,1,'x',2) for var in pxgzw_vars], axis=2)\r\n\r\n # self.pxgzw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.pxgzw = T.concatenate([density.dimshuffle(0,1,2,'x') for density in pxgzw], axis=3)\r\n self.log_pxgzw = T.log(self.pxgzw + little_num)\r\n\r\n #---Building the p(z=j|x,w) posterior for all j---#\r\n # self.log_pzgxw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.log_pzgxw = T.log(self.pxgzw + little_num) -T.log(T.sum(self.pxgzw + little_num, axis=3).dimshuffle(0,1,2,'x'))", "def processAsD(self, G, PD, id):\n inNL = PD.lprevUpdate[id][1]\n outNL = PD.lprevUpdate[id][2]\n HD = getDirectedSubgraph( G, inNL, outNL, self.isSimple )\n H = nx.Graph(HD)\n components = nx.connected_component_subgraphs(H, copy=True)\n fcomponents = dict()\n it = 0\n for comp in components:\n if comp.number_of_nodes() > self.minsize:\n C_inNL = list(set(comp.nodes()).intersection(set(inNL)))\n C_outNL = list(set(comp.nodes()).intersection(set(outNL)))\n fcomponents[it] = getDirectedSubgraph(G, C_inNL, C_outNL, self.isSimple)\n\n if len(fcomponents) == 1: # * if valid components is more than 1 than split shall be performed\n curPat = fcomponents[0]\n baseParams = dict()\n baseParams['Pat'] = Pattern(H)\n baseParams['codeLengthC'] = getCodeLengthParallel( H, PD, gtype=self.gtype, case=2, isSimple=self.isSimple, inNL=baseParams['Pat'].inNL, outNL=baseParams['Pat'].outNL )\n baseParams['codeLengthCprime'] = baseParams['codeLengthC']\n baseParams['Pat'].setIC_dssg( baseParams['codeLengthC'] - baseParams['codeLengthCprime'] )\n baseParams['Pat'].setDL( computeDescriptionLength( dlmode=6, C=len(PD.lprevUpdate), gtype=self.gtype, WIS=baseParams['Pat'].InNCount, WOS=baseParams['Pat'].OutNCount, WI=baseParams['Pat'].InNL, WO=baseParams['Pat'].OutNL, kw=baseParams['Pat'].ECount, isSimple=self.isSimple, kws=baseParams['Pat'].kws ))\n baseParams['Pat'].setI( computeInterestingness( baseParams['Pat'].IC_dssg, baseParams['Pat'].DL, mode=self.imode ) )\n\n bestParams = None\n if curPat.number_of_nodes() < baseParams['Pat'].NCount:\n bestParams = dict()\n bestParams['Pat'] = Pattern(curPat)\n bestParams['codeLengthCprime'] = self.computeCodeLengthShrinkD( G, PD, 2, baseParams, bestParams, id )\n bestParams['Pat'].setIC_dssg( baseParams['codeLengthC'] - bestParams['codeLengthCprime'] )\n bestParams['Pat'].setDL( computeDescriptionLength( dlmode=6, C=len(PD.lprevUpdate), gtype=self.gtype, WIS=baseParams['Pat'].InNCount, WOS=baseParams['Pat'].OutNCount, WI=bestParams['Pat'].InNL, WO=bestParams['Pat'].OutNL, kw=bestParams['Pat'].ECount, isSimple=self.isSimple, kws=bestParams['Pat'].kws ) )\n bestParams['Pat'].setI( computeInterestingness( bestParams['Pat'].IC_dssg, bestParams['Pat'].DL, mode=self.imode ) )\n else:\n bestParams = baseParams\n\n # * Now reduce the only component in fcomponents\n FinalParams = self.getReducedSubgraphD(G, PD, baseParams, bestParams, id)\n FinalParams['SPat'] = FinalParams['Pat'].copy()\n FinalParams['Pat'] = baseParams['Pat'].copy()\n if bestParams['Pat'].I > FinalParams['SPat'].I:\n FinalParams['Pat'].setPrevOrder(id)\n FinalParams['Pat'].setPatType('shrink')\n FinalParams['SPat'].setPrevOrder(id)\n FinalParams['SPat'].setPatType('shrink')\n self.Data[id] = FinalParams\n return", "def joint(self):\n return GraphModel(self.factors).joint()", "def __init__(self, h, d_model, dropout=0.1):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)", "def massage(X, y, S, b, d):\n\n\t# Learn R, a Gaussian NB classifier which will act as a ranker\n\tR = GaussianNB()\n\tprobas = R.fit(np.asarray(X), y).predict_proba(X)\n\n\t# Create a df with training data, labels, and desired class probabilities\n\tX['class'] = y\n\tX['prob'] = [record[d] for record in probas]\n\n\t# Promotion candidates sorted by descending probability of having desired class\n\tpr = X[(X[S] == b) & (X['class'] != d)]\n\tpr = pr.sort_values(by = 'prob', ascending = False)\n\n\t# Demotion candidates sorted by ascending probability\n\tdem = X[(X[S] != b) & (X['class'] == d)]\n\tdem = dem.sort_values(by = 'prob', ascending = True)\n\n\t# Non-candidates\n\tnon = X[((X[S] == b) & (X['class'] == d)) | ((X[S] != b) & (X['class'] != d))]\n\n\t# Calculate the discrimination in the dataset\n\tdisc = discKC(X, y, S, b, d)\n\n\t# Calculate M, the number of labels which need to be modified\n\tM = (disc * len(X[X[S] == b]) * len(X[X[S] != b])) / float(len(X))\n\tM = int(M)\n\n\t# Flip the class label of the top M objects of each group\n\t# i.e. M pairs swap labels, where M is chosen to make discKC = 0\n\tc = pr.columns.get_loc(\"class\")\n\tpr.iloc[:M, c] = d\n\tdem.iloc[:M, c] = 1 - d\n\n\tX.drop(['class', 'prob'], axis = 1, inplace = True)\n\tX_prime = pd.concat([pr, dem, non]) \n\ty_prime = X_prime['class'].tolist()\n\tX_prime = X_prime.drop(['class', 'prob'], axis = 1)\n\n\treturn(X_prime, y_prime)", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def enumerate_joint_ask(X, e, P):\n Q = ProbDist(X) ## A probability distribution for X, initially empty\n Y = [v for v in P.variables if v != X and v not in e]\n for xi in P.values(X):\n Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)\n return Q.normalize()", "def update(self, obs):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n for anobs in obs:\n anobs = array(anobs)\n newgmmpartial = []\n for j, comp in enumerate(predicted):\n newgmmpartial.append(GmphdComponent(\n self.detection * comp.weight * dmvnorm(nu[j], s[j], anobs),\n comp.loc + dot(k[j], anobs - nu[j]), pkk[j]))\n\n # The Kappa thing (clutter and reweight)\n weightsum = simplesum(newcomp.weight for newcomp in newgmmpartial)\n reweighter = 1.0 / (self.clutter + weightsum)\n for newcomp in newgmmpartial:\n newcomp.weight *= reweighter\n\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm", "def pdb2pka_sugelm(self):\n import Protool\n P=Protool.structureIO()\n P.readpdb(self.pdbfile)\n P.RemoveALT()\n #import Protool.mutate\n #MUT=Protool.mutate.Mutate(P)\n #\n # Construct arrays\n #\n import pKD_dict\n self.data=pKD_dict.pKD_dict()\n self.atom_data=pKD_dict.pKD_dict()\n #\n # Create dir for mutant PDB files\n #\n import os\n mutdir=os.path.join(self.topdir,self.pdbfile+'.pdbs')\n if not os.path.isdir(mutdir):\n os.mkdir(mutdir)\n #\n # Loop over all residues\n #\n residues=P.residues.keys()\n residues.sort()\n for residue in residues:\n orgres=P.resname(residue)\n print 'Calculating for %s %s' %(residue,P.resname(residue))\n #\n # If neutral mutate to Asp, Glu, Lys, Arg, His\n #\n targets=[]\n for res in ['ARG','LYS','HIS','ASP','GLU']:\n if P.resname(residue)!=res:\n targets.append(res)\n #if orgres=='GLU':\n # targets.append('GLN')\n #elif orgres=='ASP':\n # targets.append('ASN')\n #elif orgres=='HIS':\n # targets.append('PHE')\n #elif orgres=='ARG' or P.resname(residue)=='LYS':\n # targets.append('MET')\n #\n # Target identified. Now model each\n #\n for target in targets:\n import pKD_tools\n resid=pKD_tools.get_resid_from_res(residue)\n orgres=P.resname(residue)\n filename=os.path.join(mutdir,'%s:%s:%s.pdb' %(residue,orgres,target))\n mutation='%s:%s:%s' %(residue,orgres,target)\n if not os.path.isfile(filename):\n import Design_pKa_help\n Design_pKa_help.make_mutation(self.pdbfile,mutation)\n NP=Protool.structureIO()\n NP.readpdb(filename)\n NP.writepdb(filename,TER=None)\n #\n # Calculate the interaction energies\n #\n protein,routines,forcefield,apbs_setup,lig_titgrps = pdb2pka.pre_init(pdbfilename=filename,\n ff='parse',\n ligand=None,\n verbose=1)\n mypkaRoutines = pdb2pka.pKaRoutines(protein, routines, forcefield,apbs_setup)\n #\n # Find our group\n #\n sp=residue.split(':')\n chainid=sp[0]\n resnum=int(sp[1])\n mypkaRoutines.findTitratableGroups()\n this_pKa=None\n for pKa in mypkaRoutines.pKas:\n print pKa.residue.resSeq,resnum\n print pKa.residue.chainID,chainid\n print pKa.residue.name,target\n print pKa.pKaGroup.name,target\n print '--------------'\n print 'ChainID',pKa.residue.chainID\n if pKa.residue.resSeq==resnum and pKa.residue.chainID==chainid and pKa.residue.name==target and pKa.pKaGroup.name==target:\n #print 'Found group',pKa.residue.resSeq,pKa.pKaGroup.name\n this_pKa=pKa\n break\n if not this_pKa:\n raise Exception,'Could not find inserted titratable group'\n mypkaRoutines.get_interaction_energies_setup(this_pKa,mode='pKD')\n matrix=mypkaRoutines.matrix\n #\n # Dig the interaction energies out of the pdb2pka array\n #\n for titration1 in matrix[this_pKa].keys():\n for state1 in matrix[this_pKa][titration1].keys():\n grp_sub=matrix[this_pKa][titration1][state1]\n if mypkaRoutines.is_charged(this_pKa,titration1,state1):\n for pKa2 in grp_sub.keys(): \n import string\n chainID2=pKa.residue.chainID\n resid2='%s:%s' %(chainID2,string.zfill(pKa2.residue.resSeq,4))\n for titration2 in grp_sub[pKa2].keys():\n for state2 in grp_sub[pKa2][titration2].keys():\n if mypkaRoutines.is_charged(pKa2,titration2,state2):\n #\n # Both states are charged, so now we can pull the\n # interaction energies out\n #\n if not self.data.has_key(mutation):\n self.data[mutation]={}\n self.data[mutation][resid2]=grp_sub[pKa2][titration2][state2]\n #\n # Get the potentials at all atoms too\n #\n all_pots=mypkaRoutines.all_potentials[this_pKa][titration1][state1]\n sub_all_pots=all_pots[pKa2][titration2][state2]\n for atom in sub_all_pots.keys():\n resid=mutation\n import pKD_tools\n resid2=pKD_tools.get_resid_from_res(atom)\n atomname=atom.split(':')[-1] #atom.name\n if atomname[0]=='H' or atomname in ['N','C','O']:\n continue # Skip all H atoms and all non-CA backbone atoms to save memory\n if not self.atom_data.has_key(resid):\n self.atom_data[resid]={}\n if not self.atom_data[resid].has_key(resid2):\n self.atom_data[resid][resid2]={}\n self.atom_data[resid][resid2][atomname]=abs(sub_all_pots[atom])\n return self.data,self.atom_data", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def rebuildDagPose():\n\n dagPoses = set()\n connectedSkinClusters = set()\n selection = pmc.selected()\n joints = pmc.listRelatives(selection[0], path=True, allDescendents=True, type='joint')\n joints.insert(0, selection[0])\n\n for jnt in joints:\n dagPoses.update(jnt.listConnections(type='dagPose'))\n\n for dag in dagPoses:\n connectedSkinClusters.update(dag.listConnections(type='skinCluster'))\n\n pmc.delete(dagPoses) \n pmc.select(joints, replace=True) \n newDagPose = pmc.dagPose(save=True, selection=True, bindPose=True)\n\n print 'New dagPose, {0}, created'.format(newDagPose.shortName())\n\n for sc in connectedSkinClusters:\n print 'Connecting {0}.message to {1}.bindPose'.format(newDagPose.shortName(), sc.shortName())\n newDagPose.message.connect(sc.bindPose)", "def adjoint(self): # pragma: no cover\r\n raise NotImplementedError()", "def test_fk():\n\n joints = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n\n path_planner = PathPlanner(\"manipulator\")\n\n pose = path_planner.get_fk(joints)\n\n print pose", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def P_prior(self):\n return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))", "def computePPFwithAlpha(self, ddist = 5.0, dangle = 2*math.pi/12.0):\n\n # global model descriptor, gmd\n gmd = {}\n\n ntemppoint = self.temppnts.shape[0]\n for i in range(ntemppoint):\n print i, ntemppoint\n for j in range(ntemppoint):\n # for i in range(0,1):\n # for j in range(3,4):\n m_0 = np.asarray(self.temppnts[i])\n m_1 = np.asarray(self.temppnts[j])\n v_m0m1 = m_0-m_1\n v_m1m0 = m_1-m_0\n n_m0 = self.tempnormals[i]\n n_m1 = self.tempnormals[j]\n # f1, namely ||d||2\n f1 = np.linalg.norm(m_0-m_1)\n # f2, namely angle between n_m0 and v_m1m0\n f2 = rm.radian_between(n_m0, v_m1m0)\n # f3, namely angle between n_m1 and v_m0m1\n f3 = rm.radian_between(n_m1, v_m0m1)\n # f4, namely angle between n_m0 and n_m1\n f4 = rm.radian_between(n_m0, n_m1)\n # discretize the values\n f1d = math.floor(f1/ddist)*ddist+ddist\n f2d = math.floor(f2/dangle)*dangle+dangle\n f3d = math.floor(f3/dangle)*dangle+dangle\n f4d = math.floor(f4/dangle)*dangle+dangle\n key = (f1d, f2d, f3d, f4d)\n # angle between n_m0 and x+\n xplus = np.asarray([1,0,0])\n yplus = np.asarray([0,1,0])\n nm0xangle = math.degrees(rm.radian_between(n_m0, xplus))\n rotax = np.cross(xplus, n_m0)\n if np.isnan(rotax).any() or not rotax.any():\n continue\n rotmat = rm.rodrigues(rotax, nm0xangle)\n v_m1m0onxplus = np.dot(v_m1m0, rotmat)\n v_m1m0onxplusyzproj = np.asarray([0, v_m1m0onxplus[1], v_m1m0onxplus[2]])\n alpha_m0 = rm.radian_between(v_m1m0onxplusyzproj, yplus)\n if v_m1m0onxplus[2] < 0:\n alpha_m0 = 2*math.pi - alpha_m0\n # debug\n # before transform\n pg.plotArrow(base.render, spos = m_0, epos = m_1, rgba=Vec4(0,1,0,1))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+n_m0, rgba = Vec4(1,0,0,1))\n # after transform\n # print v_m1m0onxplus\n # print v_m1m0onxplusyzproj\n pg.plotArrow(base.render, spos = m_0, epos = v_m1m0onxplus+m_0, rgba=Vec4(0,.7,.7,1))\n pg.plotArrow(base.render, spos = m_0, epos = v_m1m0onxplusyzproj+m_0, rgba=Vec4(.70,.7,.7,1))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+xplus, rgba = Vec4(.7,0,.7,1))\n # alpha_m0\n # print np.degrees(alpha_m0)\n # plot aixs\n zplus = np.asarray([0,0,1])\n pg.plotArrow(base.render, spos = m_0, epos = m_0+xplus*10, rgba = Vec4(.3,0,0,.3))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+yplus*10, rgba = Vec4(0,.3,0,.3))\n pg.plotArrow(base.render, spos = m_0, epos = m_0+zplus*10, rgba = Vec4(0,0,.3,.3))\n\n if key in gmd.keys():\n gmd[key].append([m_0, m_1, alpha_m0])\n else:\n gmd[key] = [[m_0, m_1, alpha_m0]]", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def IK_geometric(dh_params, pose):\n pass", "def setup_nonlinear_model_p(M, C, K, sx='x,y,th', \n rho=1.225, chord=0.2, polarFilename='tjaere11_ds.csv', drag=False, # Aero options\n y_AQ=0, y_AT=None, x_AQ=0, x_AT=0,\n ds='oye', tau=0.08, # tau: used for Oye, but should be taken from Polar!\n di=None):\n # \n p = defaultParams(chord=chord, rho=rho, sx=sx, ds=ds, di=di,\n M=M, C=C, K=K)\n\n # --- Aerodynamic parameters\n if y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = y_AQ\n if y_AT is None:\n p['y_AT'] = y_AQ+chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = y_AT\n p['x_AQ'] = x_AQ\n p['x_AT'] = x_AT\n\n # Read polar\n pol = Polar(polarFilename, fformat=None, radians=True, compute_params=True) # compute_params for DS\n ppol = polarParams(pol, chord=p['chord'], tau=tau)\n p.update(ppol)\n\n# # --- Dictionary\n# p.update({'linModel':False, 'drag':drag})\n return p", "def get_likelihood(self, d):\n pos = d.pos - self.parent.pos\n pos = np.dot(rotmat(-self.angle), pos)\n lik = halfnorm.pdf(pos[0],scale=self.length) * \\\n vonmises.pdf(np.arctan2(pos[1],pos[0]),self.vonmisesscale,loc=self.angle)\n #assert lik!=0.0\n return lik", "def __init__(self):\n # Lynx Dimensions in mm\n self.L1 = 76.2 # distance between joint 0 and joint 1\n self.L2 = 146.05 # distance between joint 1 and joint 2\n self.L3 = 187.325 # distance between joint 2 and joint 3\n self.L4 = 34 # distance between joint 3 and joint 4\n self.L5 = 34 # distance between joint 4 and center of gripper\n\n # Joint limits\n self.lowerLim = np.array([-1.4, -1.2, -1.8, -1.9, -2.0, -15]).reshape((1, 6)) # Lower joint limits in radians (grip in mm (negative closes more firmly))\n self.upperLim = np.array([1.4, 1.4, 1.7, 1.7, 1.5, 30]).reshape((1, 6)) # Upper joint limits in radians (grip in mm)", "def greedy_prob( eLoss_kN, ensemble_d ):\n \n N = eLoss_kN.shape[1]\n\n prob_N = np.zeros(N)\n for i in range(N):\n \n new_ensemble_D = np.append(ensemble_d, i ) # add model i to the ensemble\n prob_D = agnostic_bayes_prob(eLoss_kN[:,new_ensemble_D] ) \n prob_N[i] = prob_D[-1] # collect the probability of the recently added model\n \n return prob_N", "def jointUncertaintyMilp(mdp, oldPi, oldZC, unknownFeatStates, costOfQuery):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n y = m.addVars(rLen, name='y')\n # y prime, a helper variable\n y0 = m.addVars(rLen, name='y0', lb=0)\n\n # oldPi is a mapping from state, action (in S x A) to occupancy\n # to be consistent with x, convert it to a mapping from (s, a) where s in Sr, a in Ar\n oldX = {(s, a): oldPi[S[s], A[a]] for s in Sr for a in Ar}\n\n # integer variables\n zR = m.addVars(rLen, vtype=GRB.BINARY, name='zR')\n zC = m.addVars(len(unknownFeatStates), vtype=GRB.BINARY, name='zC')\n # zCNew indicates the newly changed features by x. note that it does not need to be constrained as integers\n zCNew = m.addVars(len(unknownFeatStates), lb=0, name='zCNew')\n\n zSafe = m.addVar(vtype=GRB.BINARY, name='zSafe')\n\n V = lambda x_local, r: sum([x_local[s, a] * r(S[s], A[a]) for s in Sr for a in Ar])\n\n # (a) flow conservation constraint\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # (b) is encoded in the transition function\n\n for consIdx in range(len(unknownFeatStates)):\n # (c) unknown features can be changed\n m.addConstr(M * zC[consIdx] >= sum(x[S.index(s), A.index(a)] for s in unknownFeatStates[consIdx] for a in A))\n # (d) constrain z^{new}_\\phi, note that lb of zCNew is 0\n m.addConstr(zCNew[consIdx] >= zC[consIdx] - oldZC[consIdx])\n\n # (e) constraints on y^0_r\n m.addConstr(sum(zC[idx] for idx in range(len(oldZC)) if oldZC[idx] == 1) <= sum(oldZC) - 1 + zSafe * M)\n for i in range(rLen):\n m.addConstr(y0[i] >= V(oldX, R[i]) - (1 - zSafe) * M)\n\n # (f) constraints on y_r\n for i in range(rLen):\n m.addConstr(y[i] <= V(x, R[i]) - y0[i] + (1 - zR[i]) * M)\n m.addConstr(y[i] <= 0 + zR[i] * M)\n\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)])\n - sum(zC[idx] * costOfQuery for idx in range(len(unknownFeatStates))),\n GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if config.VERBOSE:\n # print decision variables other than pi for debugging\n print 'oldZC', oldZC\n print 'zC', [zC[consIdx].X for consIdx in range(len(unknownFeatStates))]\n print 'y0 values', [y0[rIdx].X for rIdx in range(rLen)]\n print 'y values', [y[rIdx].X for rIdx in range(rLen)]\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def adaSynAdd(self, data, labels):\n r = {}\n g = {}\n rnorm = {}\n rsum = 0\n self.fit(data, labels)\n self.densityclf = neighbors.KNeighborsClassifier(n_neighbors=self.k) \n self.densityclf.fit(data, labels)\n \n #Note that this is an alternative approach for extracting the minority examples\n #in the *same* order as described in smoteTransform.fit()\n for index in xrange(0, len(data)):\n if labels[index] == abs(1 - self.minorityLabel):\n continue\n \n nrpoints = self.densityclf.kneighbors(data[index,:], return_distance=False)\n nrpoints = numpy.setdiff1d(nrpoints, [index])\n if self.minorityLabel == 1:\n num_majority = self.k - numpy.count_nonzero(labels[nrpoints])\n else:\n num_majority = numpy.count_nonzero(data[nrpoints])\n \n r[index] = float(num_majority) / float(self.k)\n assert(r[index] >= 0)\n \n \n for k, v in r.viewitems(): \n #print(k,v)\n rsum += v\n for k, v in r.viewitems():\n rnorm[k] = r[k] / rsum\n \n rnormsum = 0\n for k, v in rnorm.viewitems(): rnormsum += v\n #print(rnormsum)\n \n #m = mj + ml, -> if mj = m - ml, mj - ml = m - 2(ml)\n #where len(data) = m and len(r) = mj\n \n #Number of synthetic samples to generate\n G = float(len(data) - len(r) - len(r)) * float(self.beta)\n index = 0\n numNewPoints = 0\n #Convert normalised density distribution values to the number of values\n #to generate for each minority sample.\n for k, v in rnorm.viewitems():\n g[index] = int(round(rnorm[k] * G))\n numNewPoints += g[index]\n index += 1\n \n #print(numNewPoints)\n #print(self.minorityData)\n #Use this information to the smoteTransform transfer function.\n #for k, v in g.viewitems(): print(k,v)\n #len(g)\n #len(data[labels == 1])\n assert len(g) == len(data[labels == 1]), \"length of g ({0}) is different from num_minority ({1})\".format(len(g), len(data[labels == 1]))\n return self.transform(numRepeatArray = g)", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def _para_boosting(self, H):\n # print '----------------primal-dual boost-------------------'\n H = np.hstack((H, -H))\n # H_ft = np.asfortranarray((H.copy()))\n (n, p) = H.shape\n self.c = np.log(n*p)\n nu = int(n * self.ratio)\n\n if self.max_iter < 50:\n delta = 1\n else:\n delta = 40\n d = np.ones(n) / n\n d_bar = np.ones(n) / n\n a_bar = np.ones(p) / p\n a = np.ones(p) / p\n h_a = np.sum(H, axis=1) / p\n h_a_bar = h_a.copy()\n # a_bar = a\n # a_tilde = np.ones(p) / p\n h_a_tilde = h_a.copy()\n # d_tilde = np.zeros(p)\n theta = 1\n sig = 1\n tau = 1\n t = 0\n logscale = 0\n for t in range(self.max_iter):\n d = prox_mapping(h_a_tilde, d, tau, 2)\n if self.has_dcap:\n d2 = proj_cap_ent(d, 1.0 / nu)\n # d_new = d_new/d_new.sum()\n if np.abs(d.sum() - d2.sum()) > 0.0001:\n print 'error'\n d = d2\n d_tilde = d\n dtH = np.dot(d_tilde, H)\n # dtH = np.dot(H.T, d_tilde)\n a_new = prox_mapping(-dtH, a, sig, 2)\n h_a_new = np.dot(H, a_new)\n # a_tilde = a_new + theta * (a_new - a)\n h_a_tilde = (1+theta) * h_a_new - theta * h_a\n a = a_new\n h_a = h_a_new\n d_bar *= t / (t + 1.0)\n d_bar += 1.0 / (t + 1) * d\n a_bar *= t / (t + 1.0)\n a_bar += 1.0 / (t + 1) * a\n # h_a_bar = np.dot(H, a_bar)\n h_a_bar = t / (t + 1.0) * h_a_bar + 1.0/(t+1) * h_a\n if int(np.log(t+1)) == logscale:\n logscale += 1\n self.iter_num.append(t)\n if self.has_dcap:\n min_margin = ksmallest2(h_a_bar, nu)\n self.primal_obj.append(-np.mean(min_margin))\n else:\n self.primal_obj.append(- np.min(h_a_bar))\n self.margin.append(-self.primal_obj[-1])\n self.dual_obj.append(-np.max(np.dot(d_bar, H)))\n self.gap.append(self.primal_obj[-1] - self.dual_obj[-1])\n self.err_tr.append(np.mean(h_a_bar < 0))\n # if t % 100 == 0:\n # print 'iter ' + str(t) + ' ' + str(self.gap[-1])\n if self.gap[-1] < self.epsi:\n break\n self.alpha = a_bar[:p / 2] - a_bar[p / 2:]\n self.d = d_bar\n print \" pd-boosting(python): max iter#%d: , actual iter#%d\" % (self.max_iter, t)", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def buildObjective(self):\r\n\r\n # self.z_prior might be the modified version\r\n self.L_elbo = T.mean(self.reconst + self.conditional_prior + self.w_prior + self.z_prior)\r\n\r\n self.L_elbo_modif = T.mean(self.reconst + self.conditional_prior + self.w_prior_modif + self.z_prior_modif)\r\n\r\n #---Getting model parameter---#\r\n cg = ComputationGraph(self.L_elbo)\r\n #self.phi_theta is the list of all the parameters in q and p.\r\n self.params = VariableFilter(roles=[PARAMETER])(cg.variables)", "def ex_4pdeer(param): \r\n param = _parsargs(param,npar=1) \r\n \r\n # Dipolar pathways\r\n lam = param[0]\r\n pathways = [\r\n [1-lam],\r\n [lam, 0]\r\n ]\r\n return pathways", "def main(self):\n\n self.nodelist = []\n\n self.probname = self.probpath.split('/')[-1].rstrip('.mps.lp.gz')\n\n model = Model(\"TreeD\")\n eventhdlr = LPstatEventhdlr()\n eventhdlr.nodelist = self.nodelist\n model.includeEventhdlr(eventhdlr, \"LPstat\", \"generate LP statistics after every LP event\")\n model.readProblem(self.probpath)\n model.setIntParam('presolving/maxrestarts', 0)\n\n for setting in self.scip_settings:\n model.setParam(setting[0], setting[1])\n\n model.optimize()\n\n self.scipversion = 'SCIP '+str(model.version())\n # self.scipversion = self.scipversion[:-1]+'.'+self.scipversion[-1]\n\n if model.getStatus() == 'optimal':\n self.optval = model.getObjVal()\n else:\n self.optval = None\n\n\n # print(\"performing Spatial Analysis on similarity of LP condition numbers\")\n # self.performSpatialAnalysis()\n\n columns = self.nodelist[0].keys()\n self.df = pd.DataFrame(self.nodelist, columns = columns)\n\n # merge solutions from cutting rounds into one node\n if not self.showcuts:\n self.df = self.df[self.df['first'] == False].drop_duplicates(subset='age', keep='last').reset_index()", "def __init__(self, h, d_model, leaky_relu_slope=0.1, dropout=0.1, attenuation_lambda=0.1, distance_matrix_kernel='softmax'):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h # We assume d_v always equals d_k\n self.h = h\n\n self.attenuation_lambda = torch.nn.Parameter(torch.tensor(attenuation_lambda, requires_grad=True))\n\n self.linears = clones(nn.Linear(d_model, d_model), 5) # 5 for query, key, value, node update, edge update\n\n self.message = None\n self.leaky_relu_slope = leaky_relu_slope\n self.dropout = nn.Dropout(p=dropout)\n\n if distance_matrix_kernel == 'softmax':\n self.distance_matrix_kernel = lambda x: F.softmax(-x, dim=-1)\n elif distance_matrix_kernel == 'exp':\n self.distance_matrix_kernel = lambda x: torch.exp(-x)", "def to_revolute_tree(self):\n T_zero = {\"p0\": SE3.identity()}\n stack = [\"p0\"]\n tree_structure = {\"p0\": []}\n ang_lims_map = {}\n old_to_new_names = {\n \"p0\": \"p0\"\n } # Returned for user of the method (to map old joint names to new ones)\n ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb)\n count = 1\n while len(stack) > 0:\n joint = stack.pop(0)\n new_joint = old_to_new_names[joint]\n for child in self.parents[joint]:\n stack += [child]\n new_child = \"p\" + str(count)\n count += 1\n # ub[new_child] = self.ub[child]\n # lb[new_child] = self.lb[child]\n ang_lims_map[child] = new_child\n tree_structure[new_joint] += [new_child]\n new_grand_child = \"p\" + str(count)\n count += 1\n old_to_new_names[child] = new_grand_child\n tree_structure[new_child] = [new_grand_child]\n Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3))\n T_zero[new_child] = T_zero[new_joint].dot(Ry)\n d = self.d[child]\n Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3))\n T_zero[new_grand_child] = (\n T_zero[new_child].dot(Ry_back).dot(trans_axis(d, \"z\"))\n )\n tree_structure[new_grand_child] = []\n\n # for key in old_to_new_names:\n # if key in self.ub.keys():\n # ub[old_to_new_names[key]] = self.ub[key]\n # lb[old_to_new_names[key]] = self.lb[key]\n\n # for key in T_zero:\n # if key not in ub.keys() and key is not 'p0':\n # ub[key] = np.pi\n # lb[key] = -np.pi\n\n params = {\"T_zero\": T_zero, \"ub\": ub, \"lb\": lb, \"parents\": tree_structure}\n\n # print(\"normal ub: {:}\".format(self.ub))\n # print(\"ub: {:}\".format(ub))\n # print(\"lb: {:}\".format(lb))\n return RobotRevolute(params), old_to_new_names, ang_lims_map", "def build_dcel(self):\r\n\r\n # Step 1: vertex list creation\r\n for v in self.vl:\r\n self.vertices.append(Vertex(v[0], v[1]))\r\n\r\n # Step 2: hedge list creation. Assignment of twins and\r\n # vertices\r\n\r\n for e in self.el:\r\n if e[0] >= 0 and e[1] >= 0:\r\n h1 = Hedge(self.vertices[e[0]],\r\n self.vertices[e[1]])\r\n h2 = Hedge(self.vertices[e[1]], self.vertices[e[0]])\r\n h1.twin = h2\r\n h2.twin = h1\r\n self.vertices[e[1]].hedgelist.append(h1)\r\n self.vertices[e[0]].hedgelist.append(h2)\r\n self.hedges.append(h2)\r\n self.hedges.append(h1)\r\n else:\r\n print(\"oh shit boi wadup\")\r\n\r\n # Step 3: Identification of next and prev hedges\r\n for index, v in enumerate(self.vertices):\r\n v.sort_incident()\r\n l = len(v.hedgelist)\r\n if l < 2:\r\n raise DcelError(\"Badly formed dcel: less than two hedges in vertex:\" + str(index))\r\n else:\r\n for i in range(l - 1):\r\n v.hedgelist[i].nexthedge = v.hedgelist[i + 1].twin\r\n v.hedgelist[i + 1].prevhedge = v.hedgelist[i]\r\n v.hedgelist[l - 1].nexthedge = v.hedgelist[0].twin\r\n v.hedgelist[0].prevhedge = v.hedgelist[l - 1]\r\n\r\n # Step 4: Face assignment\r\n provlist = self.hedges[:]\r\n nf = 0\r\n nh = len(self.hedges)\r\n\r\n while nh > 0:\r\n h = provlist.pop()\r\n nh -= 1\r\n # We check if the hedge already points to a face\r\n if h.face == None:\r\n f = Face()\r\n nf += 1\r\n # We link the hedge to the new face\r\n f.wedge = h\r\n f.wedge.face = f\r\n # And we traverse the boundary of the new face\r\n while not h.nexthedge is f.wedge:\r\n h = h.nexthedge\r\n h.face = f\r\n self.faces.append(f)\r\n # And finally we have to determine the external face\r\n for f in self.faces:\r\n f.external = f.area() < 0", "def __init__(self, D_in, D_out, H, learning_rate):\n self.model = th.nn.Sequential(\n th.nn.Linear(D_in, H),\n th.nn.Tanh(),\n th.nn.Linear(H, D_out),\n ).double()\n\n for x in self.model.modules():\n if isinstance(x, th.nn.Linear):\n x.weight.data = th.normal(means=th.zeros(x.weight.size())).type(dtype)\n x.bias.data = th.zeros(x.bias.size()).type(dtype)\n\n self.H = H\n self.lr = learning_rate\n self.loss_fn = th.nn.MSELoss(size_average=False)", "def make_bangbang_model(d):\n icdict = {'x': 35, 'y': 0}\n # d < 0 => under-damped\n # d > 0 => over-damped\n # d = +/- 0.025 is a good choice\n pardict = {'a': 0.1, 'x0': 35,\n 'S': 0, 'd': d}\n\n DSargs = args()\n DSargs.name = 'saccade_bangbang'\n DSargs.ics = icdict\n DSargs.pars = pardict\n DSargs.tdata = [0, 50]\n DSargs.varspecs = {'x': 'y',\n 'y': 'S -(2*a+d)*y + a*a*(x0-x)'}\n DSargs.fnspecs = {'Jacobian': (['t', 'x', 'y'],\n \"\"\"[[0, 1],\n [-a*a, -(2*a+d)]]\n \"\"\")}\n return Generator.Vode_ODEsystem(DSargs)", "def adjoint(self):\n phi = self.parameters[0]\n dim, _ = self.hyperparameters[\"dimension\"]\n return PCPhase(-1 * phi, dim=dim, wires=self.wires)", "def generate_adjoint_traces(self, s, d, h):\n\n for i in range(h.nr):\n s[:,i] = self.adjoint(s[:,i], d[:,i], h.nt, h.dt)\n\n # apply adjoint filters\n\n # normalize traces\n if PAR.NORMALIZE:\n for ir in range(h.nr):\n w = np.linalg.norm(d[:,ir], ord=2)\n if w > 0: \n s[:,ir] /= w\n\n # mute direct arrival\n if PAR.MUTE:\n vel = PAR.MUTESLOPE\n off = PAR.MUTECONST\n s = smute(s, h, vel, off, constant_spacing=False)\n\n return s", "def generate_automorphisms(g: Graph, h: Graph, d: list[Vertex], i: list[Vertex]):\n\n # Refine the graphs g and h.\n color_refinement([g, h])\n\n # Make sure that the colors are balanced, and check for a bijection.\n if not is_balanced(g, h):\n return\n if is_bijection(g, h):\n\n # Generate the mapping from g -> h.\n p = generate_mapping(g, h)\n\n # If the permutation cannot be generated by this generating set, we need to add it.\n if not is_member(generating_set, p):\n generating_set.append(p)\n\n # We can now back to the last trivial ancestor nodes in the branching tree.\n while [v.label for v in d] != [v.label for v in i]:\n # We remove the vertices from d and i and mark them as 'used'.\n # This should prevent the algorithm from trying to re-explore a branch that may be skipped.\n # FIXME: This strategy seems too aggressive, the results are sometimes off by a factor 2 or 4\n d.pop().pre_labeled = True\n i.pop().pre_labeled = True\n\n return\n\n c, next_color = get_c([g, h])\n for v_g in g:\n if v_g.colornum == c:# and not v_g.pre_labeled:\n x = v_g\n break\n\n for v_h in h:\n if v_h.colornum == c and not v_h.pre_labeled:\n g1 = g + Graph(False)\n h1 = h + Graph(False)\n g1.vertices[g.vertices.index(x)].colornum = next_color\n h1.vertices[h.vertices.index(v_h)].colornum = next_color\n d.append(x)\n i.append(v_h)\n generate_automorphisms(g1, h1, d, i)", "def build_posterior(self):\n if self.gp_core.alpha is None:\n self.gp_core.build_posterior()", "def train_mdn_proposal_prior(save=True):\n\n n_iterations = n_bootstrap_iter\n n_data = 500\n\n # read data\n pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')\n obs_stats = helper.load(datadir + 'obs_stats.pkl')\n obs_stats -= pilot_means\n obs_stats /= pilot_stds\n\n # create an mdn\n net = mdn.MDN_SVI(n_inputs=9, n_hiddens=[50], act_fun='tanh', n_outputs=4, n_components=1)\n regularizer = lf.regularizerSvi(net.mps, net.sps, 0.01)\n prior_proposal = None\n\n for iter in xrange(n_iterations):\n\n # generate new data\n params = []\n stats = []\n dist = []\n i = 0\n\n while i < n_data:\n\n prop_params = sim_prior_params() if iter == 0 else np.exp(prior_proposal.gen())[0]\n if np.any(np.log(prop_params) < log_prior_min) or np.any(np.log(prop_params) > log_prior_max):\n continue\n try:\n lv = mjp.LotkaVolterra(init, prop_params)\n states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)\n except mjp.SimTooLongException:\n continue\n\n sum_stats = calc_summary_stats(states)\n sum_stats -= pilot_means\n sum_stats /= pilot_stds\n\n params.append(prop_params)\n stats.append(sum_stats)\n dist.append(calc_dist(sum_stats, obs_stats))\n i += 1\n\n print 'simulation {0}, distance = {1}'.format(i, dist[-1])\n\n params = np.array(params)\n stats = np.array(stats)\n dist = np.array(dist)\n\n # plot distance histogram\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(dist, bins=int(np.sqrt(n_data)))\n ax.set_title('iteration = {0}'.format(iter + 1))\n ax.set_xlim([0.0, 12.0])\n plt.show(block=False)\n\n # train an mdn to give the posterior\n minibatch = 100\n maxiter = int(2000 * n_data / minibatch)\n monitor_every = 100\n trainer = Trainer.Trainer(\n model=net,\n trn_data=[stats, np.log(params)],\n trn_loss=net.mlprob + regularizer / n_data,\n trn_target=net.y\n )\n trainer.train(\n maxiter=maxiter,\n minibatch=minibatch,\n show_progress=True,\n monitor_every=monitor_every\n )\n\n # calculate the approximate posterior\n mdn_mog = net.get_mog(obs_stats)\n approx_posterior = mdn_mog if iter == 0 else mdn_mog / prior_proposal\n prior_proposal = approx_posterior.project_to_gaussian()\n\n # save the net and the approximate posterior\n if save:\n helper.save((net, approx_posterior, prior_proposal, dist), netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(iter))", "def model_onelayer_pert(r):\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\tif (r > 6361000.0):\n\t\trho = 2.7\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 2.0 + 0.02\n\t\tvsh = vsv \n\t\teta = 1.0\n\n\telse:\n\t\trho = 3.1\n\t\tvpv = 7.8\n\t\tvph = vpv\n\t\tvsv = 3.0\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def createRigPoseSliderForJoint(self, joint):\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/mainScheme.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n jointName = joint\n\n if cmds.objExists(joint + \"_mover\"):\n jointName = joint\n\n else:\n\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointName = jointName.partition(nameData[1])[0]\n\n jointName = self.name + \"_\" + jointName\n\n else:\n jointName = self.name + \"_\" + jointName\n\n # create a master vertical layout\n mainLayout = QtWidgets.QVBoxLayout()\n self.rigPose_advancedLayout.addLayout(mainLayout)\n\n # create a label for the joint\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n jointLabel = QtWidgets.QLabel(joint + \":\")\n jointLabel.setFont(font)\n mainLayout.addWidget(jointLabel)\n\n # create layout for slider/button\n layout = QtWidgets.QHBoxLayout()\n mainLayout.addLayout(layout)\n\n # create slider for joint\n slider = QtWidgets.QSlider()\n layout.addWidget(slider)\n slider.setProperty(\"name\", joint)\n slider.setOrientation(QtCore.Qt.Horizontal)\n slider.setRange(0, 100)\n slider.setSingleStep(1)\n slider.valueChanged.connect(partial(self.setReferencePoseSlider, jointName + \"_mover\"))\n slider.setTracking(False)\n self.overallSlider.valueChanged.connect(slider.setValue)\n\n # create reset button\n button = QtWidgets.QPushButton(\"Reset\")\n button.setMinimumWidth(70)\n button.setMaximumWidth(70)\n layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.setStyleSheet(self.style)\n\n button.clicked.connect(partial(self.resetRigPose_Part, jointName + \"_mover\"))", "def build_posterior(self):\n raise NotImplementedError('Abstract Method')", "def prob4(d = 500): \n #import the plane data\n planeData = np.load(\"plane.npy\")\n \n tplane = planeData[:,0]\n alpha = np.deg2rad(planeData[:,1])\n beta = np.deg2rad(planeData[:,2])\n \n l = len(tplane)\n \n #define x and y functions\n def x(n):\n# Gives x position\n return d * np.tan(beta[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n def y(n):\n# Gives y position\n return d * np.tan(beta[n]) * np.tan(alpha[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n \n #define x and y prime as we will see them\n def xprime(n):\n# Gives the approximate derivative of x\n if n == 0:\n return fdq1(x, n, h = 1)\n elif n == l-1:\n return bdq1(x, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(x, n, h = 1)\n else:\n return 0\n \n def yprime(n):\n# Gives the approximate derivative of y\n if n == 0:\n return fdq1(y, n, h = 1)\n elif n == l-1:\n return bdq1(y, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(y, n, h = 1)\n else:\n return 0\n \n #define speed from x and y prime\n def speed(n):\n# print(\"speed(n) where n = \" + str(n))\n return np.sqrt((xprime(n))**2 + (yprime(n))**2)\n \n #Finally get the speed from the information we have\n spd = []\n X = []\n Y = []\n for i in range(0, l):\n spd.append(speed(i))\n X.append(x(i))\n Y.append(y(i))\n \n return spd\n \n raise NotImplementedError(\"Problem 4 Incomplete\")", "def __set_dh_params(self, joints):\n self.dh_params = {}\n\n for i in range(len(joints)):\n self.dh_params[self.alpha[i]] = joints[i].alpha\n\n self.dh_params[self.a[i]] = joints[i].a\n\n if joints[i].structure == 'revolute':\n self.dh_params[self.q[i]] = self.q[i]\n self.dh_params[self.d[i]] = joints[i].d\n\n elif joints[i].structure == 'prismatic':\n self.dh_params[self.q[i]] = joints[i].q\n self.dh_params[self.d[i]] = self.d[i]\n\n elif joints[i].structure == 'constant':\n self.dh_params[self.q[i]] = joints[i].q\n self.dh_params[self.d[i]] = joints[i].d\n\n self.__set_transform_matrices()", "def ID3(self,data,classData,featureNames, parentMajority):\n\t\t\n\t\tnData = len(data)\n\t\tnClasses = len(classData)\n\n\t\t# base case 1: if D is empty, return the parentMajority class\n\t\tif nData==0 and nClasses==0:\n\t\t\t return parentMajority\n\n\t\t# get the number of features\n\t\tnFeatures = 0\n\t\tif nData != 0:\n\t\t\tnFeatures = len(data[0])\n\t\t\t\n\t\t# find the majority of target value\n\t\tmajority = self.majority_class(classData)\n\n\t\t# base case 2: if d is empty (no features), return the majority class\n\t\tif nFeatures == 0 :\n\t\t\treturn majority\n\n\t\t# base case 3: if all instances have the same target value, return the first target value\n\t\telif classData.count(classData[0]) == nData:\n\t\t\treturn classData[0]\n\t\t\n\t\t# general case to recursively build the tree\n\t\telse:\n\n\t\t\t# Choose the best feature based on information gain\n\t\t\tgain = np.zeros(nFeatures)\n\t\t\tfor feature in range(nFeatures):\n\t\t\t\tgain[feature] = self.info_gain(data,classData,feature)\n\t\t\tbestFeature = np.argmax(gain)\n\t\t\tbestFeatureName = featureNames[bestFeature]\n\t\t\t\n\t\t\ttree = {bestFeatureName:{}}\n\t\t\t#print \"The tree %s afer the best feature %s\" % (tree, bestFeatureName)\n\n\t\t\t# Load the bestFeature's possible values into a list\n\t\t\tvalues = []\n\t\t\tfor i in range(len(self.featureValues[bestFeatureName])):\n\t\t\t\tvalues.append(self.featureValues[bestFeatureName][i])\n\t\t\t#print \"The best feature %s values %s\" % (bestFeatureName, str(values))\n\n\t\t\t# Partition the original datapoints based on the best feature possible values\n\t\t\t# and then recursively invoke ID algorithm to build subtrees\n\t\t\tfor value in values:\n\t\t\t\tnewData = []\n\t\t\t\tnewClassData = []\n\t\t\t\tindex = 0\n\n\t\t\t\t# partition the data\n\t\t\t\tfor datapoint in data:\n\t\t\t\t\tif datapoint[bestFeature]==value:\n\t\t\t\t\t\tif bestFeature==0:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[1:]\n\t\t\t\t\t\t\tnewNames = featureNames[1:]\n\t\t\t\t\t\telif bestFeature==nFeatures:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:-1]\n\t\t\t\t\t\t\tnewNames = featureNames[:-1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:bestFeature]\n\t\t\t\t\t\t\tnewdatapoint.extend(datapoint[bestFeature+1:])\n\t\t\t\t\t\t\tnewNames = featureNames[:bestFeature]\n\t\t\t\t\t\t\tnewNames.extend(featureNames[bestFeature+1:])\n\n\t\t\t\t\t\tnewData.append(newdatapoint)\n\t\t\t\t\t\tnewClassData.append(classData[index])\n\t\t\t\t\tindex += 1\n\n\t\t\t\t# Now do recursive call to build the subtrees\n\t\t\t\tsubtree = self.ID3(newData,newClassData,newNames, majority)\n\n\t\t\t\t# Add the subtree on to the tree\n\t\t\t\t#print \"The subtree %s for the current tree %s\" % ( subtree, tree,)\n\t\t\t\ttree[bestFeatureName][value] = subtree\n\n\t\t\treturn tree", "def make_dynamics_and_expert(key, state_dim, p, eta, activation):\n\n teacher_hidden_width = 32\n\n w_teacher_init = hk.initializers.RandomNormal(stddev=0.5)\n # b_teacher_init = hk.initializers.RandomNormal()\n\n # no bias, so that h(0) = 0\n def teacher_policy(state):\n mlp = hk.Sequential([\n hk.Linear(\n teacher_hidden_width, w_init=w_teacher_init, with_bias=False),\n activation,\n hk.Linear(\n teacher_hidden_width, w_init=w_teacher_init, with_bias=False),\n activation,\n hk.Linear(state_dim, w_init=w_teacher_init, with_bias=False),\n ])\n return mlp(state)\n\n teacher_policy_t = hk.without_apply_rng(hk.transform(teacher_policy))\n teacher_params = teacher_policy_t.init(key, jnp.zeros((state_dim,)))\n\n def h_disturbance(x):\n return teacher_policy_t.apply(teacher_params, x)\n\n assert np.allclose(h_disturbance(np.zeros((state_dim,))),\n np.zeros((state_dim,)))\n\n def dynamics(x, u):\n f = x - eta * x * (jnp.abs(x) ** p) / (1 + (jnp.abs(x)**p))\n g = eta / (1 + (jnp.abs(x)**p)) * (h_disturbance(x) + u)\n return f + g\n\n def expert_policy(state):\n return -h_disturbance(state)\n\n return dynamics, expert_policy", "def predict(self, x):\n\n x_ = self.scaler_s1.transform(x)\n s1_df = self.basemodel.decision_function(x_)\n s2_df = self.confidencemodel.decision_function(x_)\n\n '''\n hc_df = np.max(dfs, 1)\n\n # high confidence vector\n hc_df[(dfs > 0).sum(1) > 1] = np.min(dfs, 1)[(dfs > 0).sum(1) > 1]\n '''\n\n hc_df = -np.ones((s2_df.shape[0], 1))\n y_pred1 = self.basemodel.predict(x_)\n\n unique_labels = range(s2_df.shape[1])\n for label in unique_labels:\n hc_df[y_pred1 == label, :] = s2_df[y_pred1 == label, label][:, np.newaxis]\n\n '''\n # TODO modif this to be compatible with multilabel classifiers (currently only binary classifier)\n xor_mask = np.logical_xor(s2_df[:, 0] > 0, s2_df[:, 1] > 0)\n\n if xor_mask.sum() > 0:\n hc_df[xor_mask, 0] = np.max(s2_df, 1)[xor_mask]\n hc_df[~xor_mask, 0] = -np.abs(np.max(s2_df, 1)[~xor_mask])\n #hc_labels = np.greater(s2_df[:, 1], s2_df[:, 0]).astype(int)\n '''\n\n hit_proba_estimate = self.hitproba.predict(x_)\n joint_hc = self.joint_class_hc.predict(x_)\n\n data_array = []\n dict_array = []\n if len(joint_hc.shape):\n\n dict_array = {'s1_df': s1_df[:, np.newaxis],\n 'hcdf': hc_df,\n 's2_df': s2_df,\n 'hitproba': hit_proba_estimate[:, np.newaxis],\n 'hcjoint': joint_hc[:, np.newaxis],\n 's1_hat': self.ohe.transform((s1_df[:, np.newaxis]>0).astype(int)),\n 's2_hat': ((self.ohe.transform((s1_df.reshape(-1, 1)>0).astype(int))>0) & (s2_df>0)).astype(int),\n }\n data_array = np.hstack(\n [s1_df[:, np.newaxis], hc_df, s2_df, hit_proba_estimate[:, np.newaxis], joint_hc[:, np.newaxis]])\n else:\n # multiclass\n data_array = np.hstack([s1_df, hc_df, s2_df, hit_proba_estimate, joint_hc])\n dict_array = {'s1_df': s1_df,\n 'hc_df': hc_df,\n 's2_df': s2_df,\n 'hitproba': hit_proba_estimate,\n 'hcjoint': joint_hc,\n 's1_hat': self.ohe.transform((s1_df > 0).astype(int)),\n 's2_hat': ((self.ohe.transform((s1_df > 0).astype(int)) > 0) & (s2_df > 0)).astype(int)\n }\n\n return data_array, dict_array", "def generate_from_tensor_and_model(naive_model: KBModelM1, input_path: str, debug: bool = False) -> 'KBModelM2':\n # the list of adjacency matrices of the object property relations created in load_tensor\n relation_adjaceny_matrices = load_graph_npz(input_path)\n\n # dictionary pointing from a relation id to the functionality score\n # this functionality score is the average number of outgoing edges an entity has of this relation type given\n # that it has any outgoing edges of this relation type\n functionalities = {}\n\n # dictionary pointing from a relation id to the inverse functionality score\n # this inverse functionality score is the average number of incoming edges an entity has of this relation type\n # given that it has any incoming edges of this relation type\n inverse_functionalities = {}\n\n # dictionary pointing from a relation id to a boolean indicating if this relation has any reflexive edges\n relation_id_to_reflexiveness = {}\n\n # dictionary pointing from a relation id to its density\n # the density says how clustered the edges are around specific nodes\n # the lowest possible density is sqrt(num_edges_of_relation_type) and it means that every edge is between\n # a different subject and object than the other edges, i.e. an entity can appear only once as subject and once\n # as object for this relation type\n # the highest possible density is 1.0 and it means that the edges of this relation type have the minimum amount\n # of entities as subjects and objects needed to have that many edges (e.g., we have 1000 relations they start\n # at 1 entity (subject) and go to 1000 other entities (objects)\n relation_id_to_density = {}\n\n # dictionary pointing from relation id to a count of how many different subjects appear with this relation\n relation_id_to_distinct_subjects = {}\n\n # dictionary pointing from relation id to a count of how many different objects appear with this relation\n relation_id_to_distinct_objects = {}\n\n # iterate over the adjacency matrix of each relation type\n # the index of each matrix is the id of the relation type\n # the rows of each matrix contain the ids of the subject of the relation\n # the columns of each matrix contain the ids of the object of the relation\n print(f\"Learning advanced relation distributions...\")\n for relation_id in tqdm(range(len(relation_adjaceny_matrices))):\n adjacency_matrix = relation_adjaceny_matrices[relation_id]\n\n # how often an entity id appears as subject in a relation\n # axis = 1 sums the row values\n subject_frequencies = csr_matrix(adjacency_matrix.sum(axis=1))\n\n # how often an entity id appears as object in a relation\n # axis = 0 sums the column values\n object_frequencies = csr_matrix(adjacency_matrix.sum(axis=0))\n\n # the number of different (distinct) entities that appear as subject/object\n num_distinct_subjects = subject_frequencies.nnz\n num_distinct_objects = object_frequencies.nnz\n relation_id_to_distinct_subjects[relation_id] = num_distinct_subjects\n relation_id_to_distinct_objects[relation_id] = num_distinct_objects\n\n # the number of edges of this relation type divided by the product of the number of distinct entities\n # that are subjects and the number of distinct entities that are objects of this relation\n density_score = float(adjacency_matrix.nnz) / (num_distinct_subjects * num_distinct_objects)\n relation_id_to_density[relation_id] = density_score\n\n # the average number of outgoing edges an entity has of this relation type given that it has any outgoing\n # edges of this relation type\n functionalities[relation_id] = float(subject_frequencies.sum()) / num_distinct_subjects\n\n # the average number of incoming edges an entity has of this relation type given that it has any incoming\n # edges of this relation type\n inverse_functionalities[relation_id] = float(object_frequencies.sum()) / num_distinct_objects\n\n # True if any reflexive edge exists in the adjacency matrix\n relation_id_to_reflexiveness[relation_id] = adjacency_matrix.diagonal().any()\n\n owl_model = KBModelM2(\n m1_model=naive_model,\n functionalities=functionalities,\n inverse_functionalities=inverse_functionalities,\n relation_id_to_density=relation_id_to_density,\n relation_id_to_distinct_subjects=relation_id_to_distinct_subjects,\n relation_id_to_distinct_objects=relation_id_to_distinct_objects,\n relation_id_to_reflexiveness=relation_id_to_reflexiveness)\n\n return owl_model", "def build_posterior(self, q):\n v = q\n if hasattr(q, '__iter__'):\n if hasattr(q[0], '__iter__'):\n if len(q[0]) is not self.N:\n raise ValueError('Specified coordinates have incorrect dimensionality')\n elif self.N is 1:\n v = [(k,) for k in q]\n else:\n raise ValueError('The number of specified points must be greater than 1')\n else:\n raise ValueError('The number of specified points must be greater than 1')\n\n\n lengths = self.s * self.scale_lengths\n K_qx = self.matrix(v, self.x, lengths)\n K_qq = self.matrix(v, v, lengths)\n self.mu = dot(K_qx, self.H)\n self.sigma = K_qq - dot( K_qx, solve( self.K_xx, K_qx.T ) )\n return self.mu, self.sigma", "def order_joint_states(self, data):\n ordered_joint = []\n \n # Create a dictionary that contains joint name and position \n # starting from a JointState msg\n joints_dictionary = dict(zip(data.joint_state.name, data.joint_state.position))\n \n # helper variable \n suffix = 'FJ0' \n\n # For each joint name, look for the corresponding value in the joints_dictionary\n for key in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n \n # Check if the key contains \"FJ0\": \"FFJ0\", \"MFJ0\", \"RFJ0\", \"LFJ0\"\n # This has to be done because for convention ?FJO = ?FJ1 + ?FJ2\n if key[1:] == suffix:\n ordered_joint.append( joint(joint_name = key,\n joint_target = joints_dictionary[key[:1]+\"FJ1\"] + joints_dictionary[key[:1]+\"FJ2\"])) \n else: \n ordered_joint.append( joint(joint_name = key, \n joint_target = joints_dictionary[key]) ) \n return ordered_joint", "def __repr__(self):\n return \"<PID_onject P: %s I: %s D: %s>\"\\\n % (self.K[0], self.K[1], self.K[2])", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def caculate_prob(self):\n t_H = self.tree.depth()\n t_h = 1\n while(t_h <= t_H):\n t_hnodes = self.get_h(t_h)\n t_sum = 0\n t_hpro = []\n t_cpro = []\n for t_n in t_hnodes:\n t_sum = self.tree.get_node(t_n).data[0] + t_sum\n t_node = self.tree.get_node(t_n)\n if t_node.is_leaf():\n t_node.data.append(0)\n continue\n t_childrens = self.tree.children(t_n)\n t_shang = 0\n for child in t_childrens:\n t_shang = t_shang + (child.data[0]/t_node.data[0])*np.log(child.data[0]/t_node.data[0])\n t_node.data.append(-t_shang)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n t_parentnode = self.tree.parent(t_n)\n if t_h > 1:\n t_node.data.append((t_node.data[0] / t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n t_hpro.append((t_node.data[0]/t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n else:\n t_node.data.append((t_node.data[0] / t_sum))\n t_hpro.append((t_node.data[0] / t_sum))\n\n t_cpro.append(t_node.data[1])\n t_ndata = np.array(t_hpro)\n mean = np.mean(t_ndata)\n std = np.std(t_ndata,ddof=1)\n t_sdata = np.array(t_cpro)\n mean_s = np.mean(t_sdata)\n std_s = np.std(t_sdata,ddof=1)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n if(std != 0):\n t_node.data[2] = (t_node.data[2] - mean)/std\n else:\n t_node.data[2] = (t_node.data[2] - mean)\n if(mean_s == 0 and std_s ==0):\n t_node.data[1] = -100.0\n continue\n t_node.data[1] = (t_node.data[1] - mean_s)/std_s\n t_h = t_h + 1", "def update_mp(self, obs, pool):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n result = pool.map_async(partial(self.update_obs_mp, predicted=predicted, nu=nu, s=s, pkk=pkk, k=k), obs)\n result = result.get()\n for newgmmpartial in result:\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm", "def __init__(self,model,alpha=0,head_min=0,head_max=1,k=1,\r\n variables=[],priors=[]):\r\n \r\n import numpy as np\r\n \r\n # Append the base to the elementlist\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # Set orientation value\r\n self.alpha = alpha\r\n \r\n # Set potential scaling variables\r\n self.head_min = head_min\r\n self.head_max = head_max\r\n \r\n # Assign the hydraulic conductivity of the base model\r\n self.k = k\r\n \r\n # The model requires the base flow in terms of hydraulic potential (phi)\r\n # The function head_to_potential extracts the following variables:\r\n # phi_min hydraulic potential corresponding to head_min\r\n # phi_max hydraulic potential corresponding to head_max\r\n self.head_to_potential()\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.variables += [var]\r\n self.model.priors += [self.priors[idx]]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def __init__(self, dim_hv, dim_hw, msg_dim):\n super(PairMessageGenerator, self).__init__()\n self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim\n self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048\n self.mlp = nn.Sequential(\n nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity\n nn.Linear(self.in_dim, self.msg_dim),\n nn.LeakyReLU(0.2)\n )", "def __init__(self):\n super(PriProb, self).__init__()\n # initialize R: distribute R_TOTAL reward points in J_avi locations randomly\n # self.r preserved for debugging, no real use in the script\n self.r = np.array(ad.randint_upto_sum(R_TOTAL, J_avi)).astype(NP_DTYPE)\n\n # expand self.r from J_avi locations to J locations using is_avi\n self.r_exp = np.zeros((J), dtype=NP_DTYPE)\n self.r_exp[np.nonzero(is_avi.cpu().numpy())] = self.r\n\n #normalizedR = ad.normalize(self.r_exp, using_max=False)\n self.R = nn.Parameter(torch.from_numpy(self.r_exp))", "def create_om_problem(prob):\n ivc = om.IndepVarComp()\n\n # Add subsystems to problem ##\n add_subsystems(prob, ivc)\n\n # Defining problem parameters ##\n add_parameters(prob, ivc)\n\n # Setting up the problem options ##\n driver_setup(prob)\n\n # Setup the model hierarchy for OpenMDAO ##\n prob.setup()", "def learn(self, D, **kwargs):\n pass", "def new(self):\n self.labels = ((torch.empty((self.P, 1)).random_(0, 2) - .5) * 2)\n self.D = torch.empty((self.P, self.d)).normal_()\n\n if not self.big:\n self.labels = self.labels.cuda()\n self.D = self.D.cuda()\n\n torch.save(self.D, self.main_dir + '/data/D0.pt')\n torch.save(self.labels, self.main_dir + '/data/labels.pt')", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def _getModelPosterior(self, min):\n Sigma = self._getLaplaceCovar(min)\n n_params = self.vd.getNumberScales()\n ModCompl = 0.5 * n_params * \\\n sp.log(2 * sp.pi) + 0.5 * sp.log(sp.linalg.det(Sigma))\n RV = min['LML'] + ModCompl\n return RV", "def FK_dh(dh_params, joint_angles, link):\n pass", "def initJoints(self):\n self.joints = list(self.tree.nodes)", "def define_gan(g_model, d_model):\r\n # make weights in the discriminator (some shared with the q model) as not trainable\r\n d_model.trainable = False\r\n # connect g outputs to d inputs\r\n d_output = d_model(g_model.output)\r\n # define composite model\r\n model = Model(g_model.input, d_output)\r\n # compile model\r\n opt = Adam(lr=0.0001, beta_1=0.5)\r\n model.compile(loss=['binary_crossentropy'], optimizer=opt)\r\n return model", "def p(self):\n return hlp.parms(self.y(0))", "def PrisonersDilemma(R=-2, P=-4, S=-5, T=0):\n if not (T > R > P > S):\n raise TypeError(\"the input values for a Prisoners Dilemma must be of the form T > R > P > S\")\n from sage.matrix.constructor import matrix\n A = matrix([[R, S], [T, P]])\n g = NormalFormGame([A, A.transpose()])\n g.rename('Prisoners dilemma - ' + repr(g))\n return g", "def clfqp(self,x,p):\n alp = self.alp_opt\n nu = self.nu_opt\n dt = self.dt\n n = self.n\n I = np.identity(n)\n M = self.ncm(x,p)\n nu = np.size(self.h_or_g(x,p),1)\n u = cp.Variable((nu,1))\n e = np.reshape(x,(n,1))\n fx = np.reshape(self.dynamicsf(x,p),(n,1))\n gx = self.h_or_g(x,p)\n dMdt = (nu*I-M)/dt\n constraints = [2*e.T@(fx+gx@u)+e.T@dMdt@e <= -2*alp*e.T@M@e]\n prob = cp.Problem(cp.Minimize(cp.sum_squares(u)),constraints)\n prob.solve()\n u = u.value\n u = np.ravel(u)\n return u", "def prob4():\n #set up the matrices\n solvers.options['show_progress'] = False\n Q = matrix(np.array([[3., 2.,1.],[2.,4.,2.],[1., 2., 3.]]))\n r = matrix([3.,0., 1.])\n #solve the matrices\n sol=solvers.qp(Q, r)\n return np.ravel(sol['x']), sol['primal objective']", "def __init__(self, D_in, H, D_out):\n super(SimpleNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.linear2 = torch.nn.Linear(H, D_out)", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def d_dp(self, points):\n d_dp = self.model.components.reshape(self.model.n_active_components,\n -1, self.n_dims)\n return d_dp.swapaxes(0, 1)", "def predict_proba(self):\n ...", "def gor4(self):\n return _GOR4.predict(self.sequence)", "def train(self, X, y, print_progress=False):\n N, D = X.shape\n #yy = np.array(y.cpu(), dtype = np.dtype(float))\n y = y.float()\n\n # Create kernel matrix K\n #t1 = time.time()\n K = torch.zeros((N, N), device=X.device)\n for i in range(N):\n for j in range(N):\n if j>i:\n kk = self.kernel(X[i,:], X[j,:])\n K[i,j] = kk\n K[j,i] = kk\n elif j==i:\n K[i,j] = self.kernel(X[i,:], X[j,:])\n #t_k = time.time() - t1\n #print(\"t_k = \", t_k)\n print(\"start QP...\")\n \n # Using qpth =========================\n# # Set up QP problem\n# Q = torch.ger(y, y) * K + self.eps*torch.eye(N, device=X.device) #torch.outer=torch.ger\n# p = -torch.ones(N, device=X.device)\n# A = torch.reshape(y, (1,N)) # reshape as 2D\n# b = torch.zeros(1, device=X.device)\n \n# if self.C is None:\n# G = torch.diag(-torch.ones(N, device=X.device))\n# h = torch.zeros(N, device=X.device)\n# #print(\"G\", G.dtype, \"h\", h.dtype)\n# else:\n# G = torch.vstack((torch.diag(-torch.ones(N, device=X.device)), torch.eye(N, device=X.device)))\n# h = torch.hstack((torch.zeros(N, device=X.device), torch.ones(N, device=X.device)*self.C/N))\n# #print(\"G\", G.dtype, \"h\", h.dtype)\n \n# # Solve alpha by QP\n# #t2 = time.time()\n# solution = qpth.qp.QPFunction(verbose=print_progress)(Q, p, G, h, A, b)\n# alpha = solution.view(-1) # reshape as 1D\n# #t_qp = time.time() - t2\n# #print(\"t_qp = \", t_qp)\n \n # Using cvxopt ======================\n # Set up QP problem\n K = np.array(K, dtype=np.float64)\n yy = np.array(y, dtype=np.float64)\n \n P = cvxopt.matrix(np.outer(yy, yy) * K)\n q = cvxopt.matrix(-np.ones(N))\n A = cvxopt.matrix(yy, (1,N)) # reshape as 2D\n b = cvxopt.matrix(0.0)\n #print(K[1:5,1:5],P[1:5,1:5])\n \n if self.C is None:\n G = cvxopt.matrix(np.diag(-np.ones(N)))\n h = cvxopt.matrix(np.zeros(N))\n else:\n G = cvxopt.matrix(np.vstack((np.diag(-np.ones(N)), np.identity(N))))\n h = cvxopt.matrix(np.hstack((np.zeros(N), np.ones(N)*self.C/N)))\n \n # Solve alpha by QP\n cvxopt.solvers.options['show_progress'] = print_progress\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n alpha = torch.tensor(np.ravel(solution['x']))\n K = torch.tensor(K)\n # =======================================\n \n # Save support vectors\n isSV = alpha>1e-5\n idx = torch.arange(alpha.shape[0])[isSV]\n self.alpha = alpha[isSV]\n self.sv = X[isSV]\n self.sv_y = y[isSV]\n #print(\"%d support vectors out of %d points\" % (len(self.alpha), N))\n \n # Calculate and save parameter b\n self.b = torch.sum(self.sv_y)\n for r in range(len(self.alpha)):\n self.b -= torch.sum(self.alpha * self.sv_y * K[idx[r], isSV])\n self.b = self.b / len(self.alpha)", "def joint_call(self):\n import features_pssm\n class_values = self.call_class()\n dssp_chain_A, dssp_chain_B, dssp_chain_comp_A, dssp_chain_comp_B = self.call_dssp()\n binana_features = self.call_binana(self.autodock, self.autodock_2)\n self.call_pssm()\n pssm_output_A = \"output/pssm_\" + self.pdb_name + \"_\" + self.chains[0] + \".pssm\"\n jsd_values_A = features_pssm.joint_call(pssm_output_A)\n pssm_output_B = \"output/pssm_\" + self.pdb_name + \"_\" + self.chains[1] + \".pssm\"\n jsd_values_B = features_pssm.joint_call(pssm_output_B)\n #move_to_folder()\n return class_values, dssp_chain_A, dssp_chain_B, dssp_chain_comp_A, dssp_chain_comp_B, binana_features, jsd_values_A, jsd_values_B", "def _auto_influence(self, mod, rigid, pair_blend):\n\n constraint = rigid.sibling(type=\"rdConstraint\")\n\n # This is fine (but what does it mean? :O )\n if not constraint:\n return\n\n def bake_joint_orient(mat, orient):\n \"\"\"Bake jointOrient values\n\n Such that keyframes can be made without\n taking those into account. E.g. a joint with 0 rotate\n but 45 degrees of jointOrient should only require a key\n with 0 degrees.\n\n \"\"\"\n\n assert isinstance(mat, cmdx.om.MMatrix)\n assert isinstance(orient, cmdx.om.MQuaternion)\n\n mat_tm = cmdx.om.MTransformationMatrix(mat)\n new_quat = mat_tm.rotation(asQuaternion=True) * orient\n mat_tm.setRotation(new_quat)\n\n return mat_tm.asMatrix()\n\n transform = rigid.parent()\n\n joint_orient = self._cache[(transform, \"jointOrient\")]\n\n # pairBlend directly feeds into the drive matrix\n compose = mod.create_node(\"composeMatrix\", name=\"composePairBlend\")\n mod.connect(pair_blend[\"inTranslate1\"], compose[\"inputTranslate\"])\n mod.connect(pair_blend[\"inRotate1\"], compose[\"inputRotate\"])\n\n # A drive is relative the parent frame, but the pairblend is relative\n # the parent Maya transform. In case these are not the same, we'll\n # map the pairblend into the space of the parent frame.\n parent_rigid = constraint[\"parentRigid\"].connection()\n\n # Could be connected to a scene too\n if parent_rigid.type() != \"rdRigid\":\n return\n\n relative = mod.create_node(\"multMatrix\", name=\"makeRelative\")\n\n # From this space..\n parent_transform_matrix = rigid[\"inputParentInverseMatrix\"].asMatrix()\n parent_transform_matrix = parent_transform_matrix.inverse()\n\n # To this space..\n parent_rigid_matrix = parent_rigid[\"cachedRestMatrix\"].asMatrix()\n parent_rigid_matrix = parent_rigid_matrix.inverse()\n\n total_matrix = parent_transform_matrix * parent_rigid_matrix\n total_matrix = bake_joint_orient(total_matrix, joint_orient)\n\n mod.connect(compose[\"outputMatrix\"], relative[\"matrixIn\"][0])\n mod.set_attr(relative[\"matrixIn\"][1], total_matrix)\n\n mod.connect(relative[\"matrixSum\"], constraint[\"driveMatrix\"])\n\n # Keep channel box clean\n mod.set_attr(compose[\"isHistoricallyInteresting\"], False)\n mod.set_attr(relative[\"isHistoricallyInteresting\"], False)", "def conditionalize(prior, conditional, observed):\n\n # construct joint probability table (Step 1 of Master Method)\n joint = PGM2(prior, conditional)\n #print(joint.get_cell(('POX', 'NOSPOTS')))\n\n # update joint probability table after observing value of N1 (Steps 2 and 3 of Master Method)\n joint.update(observed, 1)\n\n # marginalize to get probability distribution for N0 (Step 4 of Master Method)\n posterior = joint.marginalize(0)\n\n return posterior", "def __init__(self, input, n_in, n_out,\r\n W=None, b=None, prob_constraint_on=None):\r\n\r\n # initialize weight matrix W\r\n if W is None:\r\n self.W = theano.shared(\r\n value=np.zeros((n_in, n_out), dtype=theano.config.floatX),\r\n name='W')\r\n else:\r\n self.W = W\r\n\r\n # initialize bias b\r\n if b is None:\r\n self.b = theano.shared(\r\n value=np.zeros((n_out,), dtype=theano.config.floatX),\r\n name='b')\r\n else:\r\n self.b = b\r\n\r\n # compute prediction\r\n # the linear output\r\n lin_output = T.dot(input, self.W) + self.b\r\n \r\n if prob_constraint_on == None:\r\n #### we do not use those probability constraints\r\n self.y_pred = Sigmoid(lin_output)\r\n\r\n elif prob_constraint_on == \"top\":\r\n #### We first predict the probability of each class using softmax.\r\n # We then weight those probabilities by multiplying them by the\r\n # probability of their parent in the Galaxy Zoo Decision Tree.\r\n \r\n # class 1\r\n prob_Class1 = SoftMax(lin_output[:,0:3])\r\n \r\n # class 2\r\n prob_Class2 = SoftMax(lin_output[:,3:5])\r\n # weight these probabilities using the probability of class 1.2\r\n prob_Class2 *= T.shape_padright(prob_Class1[:,1])\r\n \r\n # class 3\r\n prob_Class3 = SoftMax(lin_output[:,5:7])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class3 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 4\r\n prob_Class4 = SoftMax(lin_output[:,7:9])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class4 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 5\r\n prob_Class5 = SoftMax(lin_output[:,9:13])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class5 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 6\r\n prob_Class6 = SoftMax(lin_output[:,13:15])\r\n \r\n # class 7\r\n prob_Class7 = SoftMax(lin_output[:,15:18])\r\n # weight these probabilities using the probability of class 1.1\r\n prob_Class7 *= T.shape_padright(prob_Class1[:,0])\r\n \r\n # class 8\r\n prob_Class8 = SoftMax(lin_output[:,18:25])\r\n # weight these probabilities using the probability of class 6.1\r\n prob_Class8 *= T.shape_padright(prob_Class6[:,0])\r\n \r\n # class 9\r\n prob_Class9 = SoftMax(lin_output[:,25:28])\r\n # weight these probabilities using the probability of class 2.1\r\n prob_Class9 *= T.shape_padright(prob_Class2[:,0])\r\n \r\n # class 10\r\n prob_Class10 = SoftMax(lin_output[:,28:31])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class10 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # class 11\r\n prob_Class11 = SoftMax(lin_output[:,31:37])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class11 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # concatenate all the probabilities into a single tensor variable\r\n self.y_pred = T.concatenate(\r\n [prob_Class1, prob_Class2, prob_Class3, prob_Class4,\r\n prob_Class5, prob_Class6, prob_Class7, prob_Class8,\r\n prob_Class9, prob_Class10, prob_Class11], axis=1)\r\n elif prob_constraint_on == \"down\":\r\n #### we use those probability constraints\r\n \r\n # the following probabilities should sum up to 1, so we use SoftMax\r\n # to predict all of them\r\n ind1 = [2, 8, 15, 16, 17, 25, 26, 27, 31, 32, 33, 34, 35, 36]\r\n p1 = SoftMax(lin_output[:,ind1])\r\n prob_Class1_3 = p1[:,0]\r\n prob_Class4_2 = p1[:,1]\r\n prob_Class7 = p1[:,2:5]\r\n prob_Class9 = p1[:,5:8]\r\n prob_Class11 = p1[:,8:14]\r\n \r\n prob_Class4_1 = T.sum(prob_Class11, axis=1)\r\n prob_Class2_1 = T.sum(prob_Class9, axis=1)\r\n prob_Class2_2 = prob_Class4_1 + prob_Class4_2\r\n prob_Class1_1 = T.sum(prob_Class7, axis=1)\r\n prob_Class1_2 = prob_Class2_1 + prob_Class2_2\r\n prob_Class1 = T.concatenate(\r\n [T.shape_padright(prob_Class1_1),\r\n T.shape_padright(prob_Class1_2),\r\n T.shape_padright(prob_Class1_3)], axis=1)\r\n prob_Class2 = T.concatenate(\r\n [T.shape_padright(prob_Class2_1),\r\n T.shape_padright(prob_Class2_2)], axis=1)\r\n prob_Class4 = T.concatenate(\r\n [T.shape_padright(prob_Class4_1),\r\n T.shape_padright(prob_Class4_2)], axis=1)\r\n \r\n # the following probabilities should sum up to 1, so we use SoftMax\r\n # to predict all of them\r\n ind2 = [14, 18, 19, 20, 21, 24, 23, 24] \r\n p2 = SoftMax(lin_output[:,ind2])\r\n prob_Class6_2 = p2[:,0]\r\n prob_Class8 = p2[:,1:8]\r\n prob_Class6_1 = T.sum(prob_Class8, axis=1)\r\n prob_Class6 = T.concatenate(\r\n [T.shape_padright(prob_Class6_1),\r\n T.shape_padright(prob_Class6_2)], axis=1)\r\n \r\n # for the following probabilities, we resort to the same strategy in\r\n # the \"top\" option\r\n # class 3\r\n prob_Class3 = SoftMax(lin_output[:,5:7])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class3 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 5\r\n prob_Class5 = SoftMax(lin_output[:,9:13])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class5 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 10\r\n prob_Class10 = SoftMax(lin_output[:,28:31])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class10 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # concatenate all the probabilities into a single tensor variable\r\n self.y_pred = T.concatenate(\r\n [prob_Class1, prob_Class2, prob_Class3, prob_Class4,\r\n prob_Class5, prob_Class6, prob_Class7, prob_Class8,\r\n prob_Class9, prob_Class10, prob_Class11], axis=1)\r\n \r\n \r\n # parameters of the model\r\n self.params = [self.W, self.b]", "def L1U(A, d):\n n = shape(A)[0]\n L = eye(n)\n U = matrix(zeros((n,n))); U[0,0] = A[0,0]\n for k in range(1,n):\n km = array([0, k - d]).max()\n if km < k:\n L[k, km:k] = A[k, km:k]\n rforwardsolve(U[km:k, km:k].T, L[k, km:k].T, d) # L\n U[km:(k + 1), k] = A[km:(k + 1), k]\n rforwardsolve(L[km:(k + 1), km:(k + 1)], U[km:(k + 1), k], d) # U\n return L, U", "def _init_parameters(self,diameterInd):\n\n\t\t# topological parameters\n\t\tself.nNodes = 21\n\t\tself._axonNodes=self.nNodes\n\t\tself._paraNodes1=40\n\t\tself._paraNodes2=40\n\t\tself._axonInter=120\n\t\tself._axonTotal=221\n\t\t# morphological parameters\n\t\tpossibleDiameters = [5.7, 7.3, 8.7, 10.0, 11.5, 12.8, 14.0, 15.0, 16.0]\n\t\tself._fiberD=possibleDiameters[diameterInd]\n\t\tself._paraLength1=3 \n\t\tself._nodeLength=1.0\n\t\tself._spaceP1=0.002 \n\t\tself._spaceP2=0.004\n\t\tself._spaceI=0.004\n\t\t# electrical parameters\n\t\tself._rhoa=0.7e6 #Ohm-um\n\t\tself._mycm=0.1 #uF/cm2/lamella membrane\n\t\tself._mygm=0.001 #S/cm2/lamella membrane\n\n\t\tif self._fiberD==5.7:\n\t\t\tself._g=0.605 \n\t\t\tself._axonD=3.4\n\t\t\tself._nodeD=1.9\n\t\t\tself._paraD1=1.9\n\t\t\tself._paraD2=3.4\n\t\t\tself._deltax=500\n\t\t\tself._paraLength2=35\n\t\t\tself._nl=80\n\t\tif self._fiberD==8.7:\n\t\t\tself._g=0.661\n\t\t\tself._axonD=5.8\n\t\t\tself._nodeD=2.8\n\t\t\tself._paraD1=2.8\n\t\t\tself._paraD2=5.8\n\t\t\tself._deltax=1000\n\t\t\tself._paraLength2=40\n\t\t\tself._nl=110\n\t\tif self._fiberD==10.0:\n\t\t\tself._g=0.690\n\t\t\tself._axonD=6.9\n\t\t\tself._nodeD=3.3\n\t\t\tself._paraD1=3.3\n\t\t\tself._paraD2=6.9\n\t\t\tself._deltax=1150\n\t\t\tself._paraLength2=46\n\t\t\tself._nl=120\n\t\tif self._fiberD==11.5:\n\t\t\tself._g=0.700\n\t\t\tself._axonD=8.1\n\t\t\tself._nodeD=3.7\n\t\t\tself._paraD1=3.7\n\t\t\tself._paraD2=8.1\n\t\t\tself._deltax=1250\n\t\t\tself._paraLength2=50\n\t\t\tself._nl=130\n\t\tif self._fiberD==12.8:\n\t\t\tself._g=0.719\n\t\t\tself._axonD=9.2\n\t\t\tself._nodeD=4.2\n\t\t\tself._paraD1=4.2\n\t\t\tself._paraD2=9.2\n\t\t\tself._deltax=1350\n\t\t\tself._paraLength2=54\n\t\t\tself._nl=135\n\t\tif self._fiberD==14.0:\n\t\t\tself._g=0.739\n\t\t\tself._axonD=10.4\n\t\t\tself._nodeD=4.7\n\t\t\tself._paraD1=4.7\n\t\t\tself._paraD2=10.4\n\t\t\tself._deltax=1400\n\t\t\tself._paraLength2=56\n\t\t\tself._nl=140\n\t\tif self._fiberD==15.0:\n\t\t\tself._g=0.767\n\t\t\tself._axonD=11.5\n\t\t\tself._nodeD=5.0\n\t\t\tself._paraD1=5.0\n\t\t\tself._paraD2=11.5\n\t\t\tself._deltax=1450\n\t\t\tself._paraLength2=58\n\t\t\tself._nl=145\n\t\tif self._fiberD==16.0:\n\t\t\tself._g=0.791\n\t\t\tself._axonD=12.7\n\t\t\tself._nodeD=5.5\n\t\t\tself._paraD1=5.5\n\t\t\tself._paraD2=12.7\n\t\t\tself._deltax=1500\n\t\t\tself._paraLength2=60\n\t\t\tself._nl=150\n\n\t\tself._Rpn0=(self._rhoa*.01)/(np.pi*((((self._nodeD/2)+self._spaceP1)**2)-((self._nodeD/2)**2)))\n\t\tself._Rpn1=(self._rhoa*.01)/(np.pi*((((self._paraD1/2)+self._spaceP1)**2)-((self._paraD1/2)**2)))\n\t\tself._Rpn2=(self._rhoa*.01)/(np.pi*((((self._paraD2/2)+self._spaceP2)**2)-((self._paraD2/2)**2)))\n\t\tself._Rpx=(self._rhoa*.01)/(np.pi*((((self._axonD/2)+self._spaceI)**2)-((self._axonD/2)**2)))\n\t\tself._interLength=(self._deltax-self._nodeLength-(2*self._paraLength1)-(2*self._paraLength2))/6", "def _trajectory_to_multi_dof_joint_trajectory(self, p, v, a, j, s, c):\n\n msg = tm.MultiDOFJointTrajectory()\n point = tm.MultiDOFJointTrajectoryPoint()\n msg.points.append(point)\n\n #print(p)\n\n transform = gm.Transform()\n transform.translation.x = p[0]\n transform.translation.y = p[1]\n transform.translation.z = p[2]\n quaternion = tft.quaternion_from_euler(0.0, 0.0, p[3])\n transform.rotation.x = quaternion[0]\n transform.rotation.y = quaternion[1]\n transform.rotation.z = quaternion[2]\n transform.rotation.w = quaternion[3]\n point.transforms.append(transform)\n\n velocity = gm.Twist()\n velocity.linear.x = v[0]\n velocity.linear.y = v[1]\n velocity.linear.z = v[2]\n velocity.angular.z = v[3]\n point.velocities.append(velocity)\n\n acceleration = gm.Twist()\n acceleration.linear.x = a[0]\n acceleration.linear.y = a[1]\n acceleration.linear.z = a[2]\n point.accelerations.append(acceleration)\n\n return msg", "def __init__(self, b, d, f, f_hid, is_cuda=True):\n super(RelationalNetwork, self).__init__()\n self.f_hid = f_hid\n\n self.g = GModule(f, f_hid)\n\n self.affine_aggregate = nn.Linear(d * d, 1)\n\n #aself.f_fc1 = nn.Linear(f_hid, f_hid)\n #self.f_fc1_bn = nn.BatchNorm1d(f_hid)\n\n self.coord_oi = torch.FloatTensor(b, 2)\n self.coord_oj = torch.FloatTensor(b, 2)\n if is_cuda:\n self.coord_oi = self.coord_oi.cuda()\n self.coord_oj = self.coord_oj.cuda()\n self.coord_oi = Variable(self.coord_oi)\n self.coord_oj = Variable(self.coord_oj)\n\n # prepare coord tensor\n def cvt_coord(i, d):\n return [( (i+1) / d - d/2) / (d/2), ( (i+1) % d - d/2) / (d/2)]\n\n self.coord_tensor = torch.FloatTensor(b, d**2, 2)\n if is_cuda:\n self.coord_tensor = self.coord_tensor.cuda()\n self.coord_tensor = Variable(self.coord_tensor)\n np_coord_tensor = np.zeros((b, d**2, 2))\n for i in range(d**2):\n np_coord_tensor[:, i, :] = np.array(cvt_coord(i, d))\n self.coord_tensor.data.copy_(torch.from_numpy(np_coord_tensor))\n\n self.fcout = RNOutputModel(f_hid, f) # TODO: argument is number of joints", "def __init__(self, pkl_path, joint_type='cocoplus', dtype=tf.float32):\n # -- Load SMPL params --\n with open(pkl_path, 'r') as f:\n dd = pickle.load(f) \n # Mean template vertices\n self.v_template = tf.Variable(\n undo_chumpy(dd['v_template']),\n name='v_template',\n dtype=dtype,\n trainable=False)\n # Size of mesh [Number of vertices, 3]\n self.size = [self.v_template.shape[0].value, 3]\n self.num_betas = dd['shapedirs'].shape[-1]\n # Shape blend shape basis: 6980 x 3 x 10\n # reshaped to 6980*30 x 10, transposed to 10x6980*3\n shapedir = np.reshape(\n undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T\n self.shapedirs = tf.Variable(\n shapedir, name='shapedirs', dtype=dtype, trainable=False)\n\n # Regressor for joint locations given shape - 6890 x 24\n self.J_regressor = tf.Variable(\n dd['J_regressor'].T.todense(),\n name=\"J_regressor\",\n dtype=dtype,\n trainable=False)\n\n # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*30 x 207\n num_pose_basis = dd['posedirs'].shape[-1]\n # 207 x 20670\n posedirs = np.reshape(\n undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T\n self.posedirs = tf.Variable(\n posedirs, name='posedirs', dtype=dtype, trainable=False)\n\n # indices of parents for each joints\n self.parents = dd['kintree_table'][0].astype(np.int32)\n\n # LBS weights\n self.weights = tf.Variable(\n undo_chumpy(dd['weights']),\n name='lbs_weights',\n dtype=dtype,\n trainable=False)\n\n # This returns 19 keypoints: 6890 x 19\n self.joint_regressor = tf.Variable(\n dd['cocoplus_regressor'].T.todense(),\n name=\"cocoplus_regressor\",\n dtype=dtype,\n trainable=False)\n if joint_type == 'lsp': # 14 LSP joints!\n self.joint_regressor = self.joint_regressor[:, :14]\n\n if joint_type not in ['cocoplus', 'lsp']:\n print('BAD!! Unknown joint type: %s, it must be either \"cocoplus\" or \"lsp\"' % joint_type)\n import ipdb\n ipdb.set_trace()", "def _predict(self):\n if self.net:\n # divide board\n # this means reverting changes to the env, test_env, and agent.py\n boards = []\n for child in self.children:\n boards.append(child.env.separate_players())\n boards = np.array(boards)\n boards = np.reshape(boards, (-1, boards.shape[1], boards.shape[2]) )\n # assign Q, pi from net\n Q, pi = self.net.model.predict_on_batch(boards)\n for i,child in enumerate(self.children):\n child.Q = Q[i][0]\n child.pi = pi[i]\n if not child.done(): child.pi[child.pi==0] = 1/len(child.env.legal_moves())\n #child.pi = np.random.dirichlet(pi*self.expl + 1) # add noise\n\n # set pi and Q for initial node\n if not self.parent: \n self.pi = np.ones(7) / 7\n self.Q = 0\n\n else:\n Q = 0 # unkown outcome\n pi = np.zeros(7)\n legal_moves = self.env.legal_moves()\n n_moves = len(legal_moves)\n if n_moves: pi[legal_moves] = 1/n_moves\n return Q, np.array(pi)", "def build_joint_state_msg(self):\n \n js_msg = JointState()\n js_msg.header.stamp = rospy.Time.now()\n \n if self.joint_names == []:\n self.joint_names = [\"{}.{}\".format('hand', attr) \n for attr in ORI_ATTRIBUTES] + \\\n [\"{}.{}.{}\".format(finger, bone, ori) \n for finger in FINGER_NAMES \n for bone in FINGER_BONES\n for ori in ORI_ATTRIBUTES]\n LOG.v(\"Publishing JointState for the following joints: {}\".format(self.joint_names), \"start_transmit\")\n \n js_msg.position = [0.0] * len(self.joint_names)\n\n pos = 0\n # Build JointState. First the hand... \n for i, attr in enumerate(ORI_ATTRIBUTES):\n js_msg.name.append('hand.' + str(attr))\n \n # Roll precision hack\n if attr == 'roll':\n vector = self.hand.palm_normal\n else:\n vector = self.hand.direction\n \n js_msg.position[pos] = getattr(vector, attr)\n pos += 1\n\n # ...then the fingers\n for i, finger_name, finger in \\\n [(i, finger_name, self.fingers[finger_name]) \\\n for i, finger_name in enumerate(FINGER_NAMES)]:\n \n # LEAP API v2.0: Skeletal model\n # Get bones\n for j, bone_name, bone in \\\n [(j, bone_name, finger.bone(j)) \\\n for j, bone_name in enumerate(FINGER_BONES)]:\n\n # Fill the joint values one by one\n for k, attr in enumerate(ORI_ATTRIBUTES):\n\n joint_name = \"{}.{}.{}\".format(finger_name, bone_name, attr)\n joint_value = getattr(bone.direction, attr)\n \n js_msg.name.append(joint_name)\n js_msg.position[pos] = joint_value\n pos += 1\n \n # return the JointState message\n return js_msg", "def lhs(d, num_samples=None, prob_distribution=None):\n \n return build_lhs(d, num_samples=num_samples, prob_distribution=prob_distribution)", "def __init__(self, model, line, line_ht, segments = None, influence = None, \r\n connectivity = 1, connectivity_normdist = None,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Connect this element to the solver\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n \r\n # Prepare the stochastic variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # Initialize the head target and connectivity variables\r\n self.line_ht = line_ht\r\n self.connectivity = connectivity\r\n if np.isscalar(self.connectivity): # Connectivity provided is uniform\r\n \r\n self.connectivity_uniform = True\r\n \r\n else: # Connectivity provided \r\n \r\n self.connectivity_uniform = False\r\n \r\n # Check if normalized distances were provided\r\n if connectivity_normdist is None:\r\n raise Exception('If connectivity is not uniform, a vector of equal length containing normalized distances (e.g., [0., 0.25, 0.6, 1.]) must be specified.')\r\n \r\n # Check if connectivity_normdist is valid\r\n if np.min(connectivity_normdist) < 0 or np.max(connectivity_normdist) > 1:\r\n raise Exception('connectivity_normdist values must be between 0 and 1. Current values: '+str(connectivity_normdist))\r\n \r\n # Check if connectivity_normdist is sorted\r\n if not (connectivity_normdist == np.sort(connectivity_normdist)).all():\r\n raise Exception('connectivity_normdist values must be provided in ascending order. Current values: '+str(connectivity_normdist))\r\n \r\n self.connectivity_normdist = connectivity_normdist\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n # Make a copy of the line\r\n self.line_raw = copy.copy(line)\r\n \r\n # Check if a subdivision has been specified\r\n if segments is None: # No subdivision required\r\n self.segments = line.shape[0]-1\r\n else: # Otherwise, set target\r\n self.segments = segments\r\n \r\n # A number of consistency checks\r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n if len(line_ht) != line.shape[0]:\r\n raise Exception('Number of head prescriptions must equal number of vertices: '+str(len(line_ht))+' =/= '+str(line.shape[0]))\r\n \r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(np.column_stack((line,self.line_ht)),self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n self.line_ht = copy.copy(self.line[:,2])\r\n \r\n else:\r\n \r\n # Otherwise, reconstruct the line format\r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n self.line_ht = line_ht\r\n \r\n # --------------------------------------------------------------------- \r\n \r\n # Assign the initial strength variables for each segment\r\n self.strength = np.ones(self.segments)\r\n \r\n # Prepare the influence range for this line sink\r\n if influence is None:\r\n # If no influence range is specified, set it to twice the domain radius\r\n # to ensure that no point in the model domain will lie outside this range\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n # Prepare a few variables for this element\r\n self.L = [] # Length of each line segment\r\n self.zc = [] # Center of each line segment\r\n self.head_target = [] # Head target at each line segment\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n self.head_target += [(self.line_ht[seg]+self.line_ht[seg+1])/2]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n self.head_target = np.asarray(self.head_target)\r\n \r\n # Now form a vector of cumulative distances\r\n self.cumdist = []\r\n for seg in range(self.segments):\r\n if seg == 0:\r\n self.cumdist.append(np.abs(self.zc[0]-self.line_c[0]))\r\n else:\r\n self.cumdist.append(np.abs(self.zc[seg]-self.zc[seg-1]))\r\n self.cumdist = np.cumsum(np.asarray(self.cumdist))\r\n self.cumdist /= (self.cumdist[-1] + np.abs(self.zc[-1]-self.line_c[-1]))\r\n \r\n if not self.connectivity_uniform:\r\n \r\n # Interpolate the connectivity\r\n from scipy.interpolate import interp1d\r\n itp = interp1d(self.connectivity_normdist,self.connectivity)\r\n self.connectivity_interpolated = itp(self.cumdist)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Convert the head targets to potential targets\r\n self.set_potential_target()\r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def __init__(self,dataset: np.ndarray,sampling_Du=1000,prob_au=0.5,label_normal=0,label_anomaly=1, name=\"default\"):\n super().__init__()\n self.name=name\n\n # hyperparameters:\n self.num_S=sampling_Du\n self.normal=label_normal\n self.anomaly=label_anomaly\n self.prob=prob_au\n\n # Dataset infos: D_a and D_u\n self.m,self.n=dataset.shape\n self.n_feature=self.n-1\n self.n_samples=self.m\n self.x=dataset[:,:self.n_feature]\n self.y=dataset[:,self.n_feature]\n self.dataset=dataset\n self.index_u=np.where(self.y==self.normal)[0]\n self.index_a=np.where(self.y==self.anomaly)[0]\n\n # observation space:\n self.observation_space=spaces.Discrete(self.m)\n\n # action space: 0 or 1\n self.action_space=spaces.Discrete(2)\n\n # initial state\n self.counts=None\n self.state=None\n self.DQN=None", "def pheromone_update(self):\n #Ordenar la lista de acuerdo con el tamaño de la lista\n self.sort_paths()\n for i, path in enumerate(self.paths):\n for j, element in enumerate(path):\n for edge in self.map.nodes_array[element[0]][element[1]].edges:\n if (j+1) < len(path):\n if edge['FinalNode'] == path[j+1]:\n edge['Pheromone'] = (1.0 - self.evaporation_factor)*edge['Pheromone'] + \\\n self.pheromone_adding_constant/float(len(path))\n else:\n edge['Pheromone'] = (1.0 - self.evaporation_factor)*edge['Pheromone']", "def log_pseudo_joint(self, data: Tensor, states: Tensor) -> Tensor: # type: ignore\n K = states\n Y = data\n assert K.dtype == to.uint8 and Y.dtype == to.uint8\n pi = self.theta[\"pies\"]\n W = self.theta[\"W\"]\n batch_size, S, H = K.shape\n D = W.shape[0]\n dev = pi.device\n\n logPriors = to.matmul(K.type_as(pi), to.log(pi / (1 - pi)))\n\n logPy = to.empty((batch_size, S), device=dev, dtype=self.precision)\n # We will manually set the lpjs of all-zero states to the appropriate value.\n # For now, transform all-zero states in all-one states, to avoid computation of log(0).\n zeroStatesInd = to.nonzero((K == 0).all(dim=2))\n # https://discuss.pytorch.org/t/use-torch-nonzero-as-index/33218\n zeroStatesInd = (zeroStatesInd[:, 0], zeroStatesInd[:, 1])\n K[zeroStatesInd] = 1\n # prods_nsd = prod{h}{1-W_dh*K_nkh}\n prods = (W * K.type_as(W).unsqueeze(2)).neg_().add_(1).prod(dim=-1)\n to.clamp(prods, self.eps, 1 - self.eps, out=prods)\n # logPy_nk = sum{d}{y_nd*log(1/prods_nkd - 1) + log(prods_nkd)}\n f1 = to.log(1.0 / prods - 1.0)\n indeces = 1 - Y[:, None, :].expand(batch_size, S, D)\n # convert to BoolTensor in pytorch>=1.2, leave it as ByteTensor in earlier versions\n indeces = indeces.type_as(to.empty(0) < 0)\n f1[indeces] = 0.0\n logPy[:, :] = to.sum(f1, dim=-1) + to.sum(to.log(prods), dim=2)\n K[zeroStatesInd] = 0\n\n lpj = logPriors + logPy\n # for all-zero states, set lpj to arbitrary very low value if y!=0, 0 otherwise\n # in the end we want exp(lpj(y,s=0)) = 1 if y=0, 0 otherwise\n lpj[zeroStatesInd] = -1e30 * data[zeroStatesInd[0]].any(dim=1).type_as(lpj)\n assert (\n not to.isnan(lpj).any() and not to.isinf(lpj).any()\n ), \"some NoisyOR lpj values are invalid!\"\n return lpj.to(device=states.device) # (N, S)", "def propositional_skeleton(self):\n # Task 9.5\n return Formula.skel_helper(self, {}) # use skel_helper with an empty dict to solve", "def D_coefficients_symmtop(D):\n Dpar=D[0]\n Dperp=D[1]\n D_J=np.zeros(3)\n D_J[0]= 5*Dperp + Dpar\n D_J[1]= 2*Dperp + 4*Dpar\n D_J[2]= 6*Dperp\n return D_J" ]
[ "0.55733985", "0.5363515", "0.53536046", "0.53175926", "0.526763", "0.5229534", "0.52215683", "0.51651067", "0.51620126", "0.5120631", "0.5106703", "0.50803024", "0.5077998", "0.50690013", "0.5067703", "0.50612646", "0.5049136", "0.50420725", "0.5038924", "0.50167084", "0.5012636", "0.50022846", "0.49972445", "0.49907103", "0.49770737", "0.49566028", "0.493718", "0.49273285", "0.49148265", "0.49034655", "0.48959348", "0.48925665", "0.4891792", "0.48890305", "0.48883522", "0.4867239", "0.48628506", "0.48592076", "0.48406032", "0.48370647", "0.48363748", "0.48353505", "0.48318022", "0.48135072", "0.48118874", "0.48114896", "0.48112625", "0.48042417", "0.48019513", "0.47968838", "0.47961125", "0.4790132", "0.47804195", "0.47780964", "0.47687173", "0.4768145", "0.47646108", "0.47627118", "0.47572878", "0.4756846", "0.47516254", "0.4750484", "0.47478852", "0.47468582", "0.4744832", "0.4742735", "0.47400546", "0.47387433", "0.47381803", "0.47360387", "0.4735321", "0.47296497", "0.47259918", "0.47242987", "0.47187796", "0.47085577", "0.4707313", "0.47042647", "0.47012126", "0.4689242", "0.46876025", "0.46822655", "0.4673391", "0.46697515", "0.46680275", "0.46569967", "0.46557078", "0.46519682", "0.4651797", "0.46486023", "0.4647057", "0.46461076", "0.46459636", "0.46449816", "0.46443918", "0.46431145", "0.4632292", "0.46317112", "0.46302196", "0.46289906", "0.46283534" ]
0.0
-1
r"""Anchor model variant from `"Learned Image Compression with Discretized Gaussian Mixture Likelihoods and Attention Modules"
def cheng2020_anchor(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 6: raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)') return _load_model( "cheng2020-anchor", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def generate_anchors_info():\n original_height, original_width = 512, 640\n input_anchor = Anchor(\n min_level=2,\n max_level=6,\n num_scales=1,\n aspect_ratios=[1.0, 2.0, 0.5],\n anchor_size=8,\n image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))\n anchor_boxes = input_anchor.multilevel_boxes\n for key in anchor_boxes:\n anchor_boxes[key] = anchor_boxes[key].numpy()\n\n scale = min(_IMAGE_SIZE.value / original_height,\n _IMAGE_SIZE.value / original_width)\n image_info = np.array([[[original_height, original_width],\n [_IMAGE_SIZE.value, _IMAGE_SIZE.value],\n [scale, scale], [0, 0]]])\n\n return anchor_boxes, image_info", "def __call__(self, src, label):\r\n \"\"\"color distort\"\"\"\r\n # img = random_color_distort(src)\r\n\r\n # print(\"previous label shape = \", label.shape)\r\n target = np.zeros(shape=(label.shape[0],))\r\n\r\n \"\"\"Pyramid Anchor sampling\"\"\"\r\n img, boxes, label = self.random_baiducrop(src, label[:, :4], target)\r\n # print(\"label shape = \", label.shape)\r\n # print('boxes shape =', boxes.shape)\r\n bbox = boxes\r\n # img = mx.nd.array(img)\r\n\r\n \"\"\"color distort\"\"\"\r\n img = mx.nd.array(img)\r\n img = random_color_distort(img)\r\n\r\n # \"\"\"random crop, keep aspect ration=1\"\"\"\r\n # h, w, _ = img.shape\r\n # bbox, crop_size = random_crop_with_constraints(label, (w, h))\r\n # x_offset, y_offset, new_width, new_height = crop_size\r\n # img = mx.image.fixed_crop(img, x_offset, y_offset, new_width, new_height)\r\n\r\n \"\"\"resize with random interpolation\"\"\"\r\n h, w, _ = img.shape\r\n interp = np.random.randint(0, 5)\r\n img = gimage.imresize(img, self._width, self._height, interp=interp)\r\n bbox = gbbox.resize(bbox, (w, h), (self._width, self._height))\r\n\r\n \"\"\"random horizontal flip\"\"\"\r\n h, w, _ = img.shape\r\n img, flips = gimage.random_flip(img, px=0.5)\r\n bbox = gbbox.flip(bbox, (w, h), flip_x=flips[0])\r\n\r\n \"\"\"To Tensor & Normalization\"\"\"\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n\r\n if self._anchors is None:\r\n return img, bbox\r\n\r\n # @TODO: generating training target so cpu workers can help reduce the workload on gpu\r\n face_anchors, head_anchors, body_anchors = self._anchors\r\n gt_bboxes = mx.nd.array(bbox[:, :4]).expand_dims(0)\r\n gt_ids = mx.nd.zeros((1, gt_bboxes.shape[1], 1), dtype=gt_bboxes.dtype)\r\n\r\n face_cls_targets, face_box_targets, _ = self._target_generator(\r\n face_anchors, None, gt_bboxes, gt_ids)\r\n\r\n head_cls_targets, head_box_targets, _ = self._target_generator(\r\n head_anchors, None, gt_bboxes, gt_ids)\r\n\r\n body_cls_targets, body_box_targets, _ = self._target_generator(\r\n body_anchors, None, gt_bboxes, gt_ids)\r\n\r\n return img, \\\r\n face_cls_targets[0], head_cls_targets[0], body_cls_targets[0], \\\r\n face_box_targets[0], head_box_targets[0], body_box_targets[0]", "def main():\n dataset_config = DatasetBuilder.copy_config(\n DatasetBuilder.KITTI_TRAIN)\n dataset_config.num_clusters[0] = 1\n dataset = DatasetBuilder.build_kitti_dataset(dataset_config)\n\n label_cluster_utils = LabelClusterUtils(dataset)\n clusters, _ = label_cluster_utils.get_clusters()\n\n # Options\n img_idx = 1\n # fake_clusters = np.array([[5, 4, 3], [6, 5, 4]])\n # fake_clusters = np.array([[3, 3, 3], [4, 4, 4]])\n\n fake_clusters = np.array([[4, 2, 3]])\n fake_anchor_stride = [5.0, 5.0]\n ground_plane = [0, -1, 0, 1.72]\n\n anchor_3d_generator = grid_anchor_3d_generator.GridAnchor3dGenerator()\n\n area_extents = np.array([[-40, 40], [-5, 5], [0, 70]])\n\n # Generate anchors for cars only\n start_time = time.time()\n anchor_boxes_3d = anchor_3d_generator.generate(\n area_3d=dataset.kitti_utils.area_extents,\n anchor_3d_sizes=fake_clusters,\n anchor_stride=fake_anchor_stride,\n ground_plane=ground_plane)\n all_anchors = box_3d_encoder.box_3d_to_anchor(anchor_boxes_3d)\n end_time = time.time()\n print(\"Anchors generated in {} s\".format(end_time - start_time))\n\n # Project into bev\n bev_boxes, bev_normalized_boxes = \\\n anchor_projector.project_to_bev(all_anchors, area_extents[[0, 2]])\n\n bev_fig, (bev_axes, bev_normalized_axes) = \\\n plt.subplots(1, 2, figsize=(16, 7))\n bev_axes.set_xlim(0, 80)\n bev_axes.set_ylim(70, 0)\n bev_normalized_axes.set_xlim(0, 1.0)\n bev_normalized_axes.set_ylim(1, 0.0)\n\n plt.show(block=False)\n\n for box in bev_boxes:\n box_w = box[2] - box[0]\n box_h = box[3] - box[1]\n\n rect = patches.Rectangle((box[0], box[1]),\n box_w, box_h,\n linewidth=2,\n edgecolor='b',\n facecolor='none')\n\n bev_axes.add_patch(rect)\n\n for normalized_box in bev_normalized_boxes:\n box_w = normalized_box[2] - normalized_box[0]\n box_h = normalized_box[3] - normalized_box[1]\n\n rect = patches.Rectangle((normalized_box[0], normalized_box[1]),\n box_w, box_h,\n linewidth=2,\n edgecolor='b',\n facecolor='none')\n\n bev_normalized_axes.add_patch(rect)\n\n rgb_fig, rgb_2d_axes, rgb_3d_axes = \\\n vis_utils.visualization(dataset.rgb_image_dir, img_idx)\n plt.show(block=False)\n\n image_path = dataset.get_rgb_image_path(dataset.sample_names[img_idx])\n image_shape = np.array(Image.open(image_path)).shape\n\n stereo_calib_p2 = calib_utils.read_calibration(dataset.calib_dir,\n img_idx).p2\n\n start_time = time.time()\n rgb_boxes, rgb_normalized_boxes = \\\n anchor_projector.project_to_image_space(all_anchors,\n stereo_calib_p2,\n image_shape)\n end_time = time.time()\n print(\"Anchors projected in {} s\".format(end_time - start_time))\n\n # Read the stereo calibration matrix for visualization\n stereo_calib = calib_utils.read_calibration(dataset.calib_dir, 0)\n p = stereo_calib.p2\n\n # Overlay boxes on images\n anchor_objects = []\n for anchor_idx in range(len(anchor_boxes_3d)):\n anchor_box_3d = anchor_boxes_3d[anchor_idx]\n\n obj_label = box_3d_encoder.box_3d_to_object_label(anchor_box_3d)\n\n # Append to a list for visualization in VTK later\n anchor_objects.append(obj_label)\n\n # Draw 3D boxes\n vis_utils.draw_box_3d(rgb_3d_axes, obj_label, p)\n\n # Draw 2D boxes\n rgb_box_2d = rgb_boxes[anchor_idx]\n\n box_x1 = rgb_box_2d[0]\n box_y1 = rgb_box_2d[1]\n box_w = rgb_box_2d[2] - box_x1\n box_h = rgb_box_2d[3] - box_y1\n\n rect = patches.Rectangle((box_x1, box_y1),\n box_w, box_h,\n linewidth=2,\n edgecolor='b',\n facecolor='none')\n\n rgb_2d_axes.add_patch(rect)\n\n if anchor_idx % 32 == 0:\n rgb_fig.canvas.draw()\n\n plt.show(block=False)\n\n # Create VtkGroundPlane for ground plane visualization\n vtk_ground_plane = VtkGroundPlane()\n vtk_ground_plane.set_plane(ground_plane, area_extents[[0, 2]])\n\n # Create VtkAxes\n axes = vtk.vtkAxesActor()\n axes.SetTotalLength(5, 5, 5)\n\n # Create VtkBoxes for boxes\n vtk_boxes = VtkBoxes()\n vtk_boxes.set_objects(anchor_objects, vtk_boxes.COLOUR_SCHEME_KITTI)\n\n # Create Voxel Grid Renderer in bottom half\n vtk_renderer = vtk.vtkRenderer()\n vtk_renderer.AddActor(vtk_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_ground_plane.vtk_actor)\n vtk_renderer.AddActor(axes)\n vtk_renderer.SetBackground(0.2, 0.3, 0.4)\n\n # Setup Camera\n current_cam = vtk_renderer.GetActiveCamera()\n current_cam.Pitch(170.0)\n current_cam.Roll(180.0)\n\n # Zooms out to fit all points on screen\n vtk_renderer.ResetCamera()\n\n # Zoom in slightly\n current_cam.Zoom(2.5)\n\n # Reset the clipping range to show all points\n vtk_renderer.ResetCameraClippingRange()\n\n # Setup Render Window\n vtk_render_window = vtk.vtkRenderWindow()\n vtk_render_window.SetWindowName(\"Anchors\")\n vtk_render_window.SetSize(900, 500)\n vtk_render_window.AddRenderer(vtk_renderer)\n\n # Setup custom interactor style, which handles mouse and key events\n vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()\n vtk_render_window_interactor.SetRenderWindow(vtk_render_window)\n\n vtk_render_window_interactor.SetInteractorStyle(\n vtk.vtkInteractorStyleTrackballCamera())\n\n # Render in VTK\n vtk_render_window.Render()\n vtk_render_window_interactor.Start() # Blocking\n # vtk_render_window_interactor.Initialize() # Non-Blocking", "def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1", "def box_predictor(num_anchors):\n return nn.Conv2D(num_anchors * 4, 3, padding=1)", "def _generate_anchors(base_size, scales, aspect_ratios):\n anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 0.5\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n return torch.from_numpy(anchors)", "def anchor_matching_nms(self, anchors, targets, box_cls):\n gt_classes = []\n gt_anchors_deltas = []\n keep_nms_list = []\n anchors = Boxes.cat(anchors) # Rx4\n\n box_cls = [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls]\n\n for img_idx, targets_per_image in enumerate(targets):\n match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, anchors)\n gt_matched_idxs, anchor_labels = self.matcher(match_quality_matrix)\n\n box_cls_per_image = [box_cls_per_level[img_idx] for box_cls_per_level in box_cls]\n box_cls_per_image = torch.cat(box_cls_per_image, dim=0)\n keep_nms = torch.zeros_like(box_cls_per_image).sum(dim=1)\n has_gt = len(targets_per_image) > 0\n if has_gt:\n # ground truth box regression\n matched_gt_boxes = targets_per_image.gt_boxes[gt_matched_idxs]\n gt_anchors_reg_deltas_i = self.box2box_transform.get_deltas(\n anchors.tensor, matched_gt_boxes.tensor\n )\n\n gt_classes_i = targets_per_image.gt_classes[gt_matched_idxs]\n # Anchors with label 0 are treated as background.\n gt_classes_i[anchor_labels == 0] = self.num_classes\n # Anchors with label -1 are ignored.\n gt_classes_i[anchor_labels == -1] = -1\n\n for instance_idxs in range(len(targets_per_image.gt_classes)):\n valid_idx = ((gt_matched_idxs == instance_idxs) & (anchor_labels == 1))\n if len(box_cls_per_image[valid_idx, gt_classes_i[valid_idx]]) == 0:\n continue\n max_id = torch.argmax(box_cls_per_image[valid_idx, gt_classes_i[valid_idx]])\n keep_id = torch.where(valid_idx)[0]\n keep_id = keep_id[max_id]\n keep_nms[keep_id] = 1\n keep_nms = (keep_nms == 1)\n else:\n gt_classes_i = torch.zeros_like(gt_matched_idxs) + self.num_classes\n gt_anchors_reg_deltas_i = torch.zeros_like(anchors.tensor)\n\n gt_classes.append(gt_classes_i)\n gt_anchors_deltas.append(gt_anchors_reg_deltas_i)\n keep_nms_list.append(keep_nms)\n\n return torch.stack(gt_classes), torch.stack(gt_anchors_deltas), torch.stack(keep_nms_list)", "def __create_anchors(self, sizes, aspects):\n k = len(sizes) * len(aspects)\n img_anchors = []\n for i in sizes:\n for j in aspects:\n img_anchors.append(\n [0, 0, 2 * i * j[0] / (j[0] + j[1]), 2 * i * j[1] / (j[0] + j[1])])\n\n self.anchors = np.asarray(img_anchors)", "def __build_anchors(anchor_parameters, features):\n anchors = [\n layers.Anchors(\n size=anchor_parameters.sizes[i],\n stride=anchor_parameters.strides[i],\n ratios=anchor_parameters.ratios,\n scales=anchor_parameters.scales,\n name='anchors_{}'.format(i)\n )(f) for i, f in enumerate(features)\n ]\n\n return keras.layers.Concatenate(axis=1, name='anchors')(anchors)", "def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)", "def adjust_anchors(self):\n pass", "def generate_anchors(scales=(32,), aspect_ratios=(0.5, 1, 2), dtype=np.float32):\n scales = np.array(scales)\n aspect_ratios = np.array(aspect_ratios, dtype=dtype)\n h_ratios = np.sqrt(aspect_ratios)\n w_ratios = 1 / h_ratios\n\n ws = (w_ratios[:, None] * scales[None, :]).reshape(-1)\n hs = (h_ratios[:, None] * scales[None, :]).reshape(-1)\n\n base_anchors = np.stack([-ws, -hs, ws, hs], axis=1) / 2\n return base_anchors", "def set_anchors(mc):\n H, W, C = _get_output_shape(mc)\n B = mc.ANCHOR_PER_GRID\n X = np.array(mc.INITIAL_ANCHOR_SHAPES)\n X[:,0] *= mc.IMAGE_WIDTH\n X[:,1] *= mc.IMAGE_HEIGHT\n anchor_shapes = np.reshape( # it refers to the anchor width and height\n [X] * H * W,\n (H, W, B, 2)\n )\n center_x = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B), \n (B, H, W)\n ),\n (1, 2, 0)\n ),\n (H, W, B, 1)\n )\n center_y = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),\n (B, W, H)\n ),\n (2, 1, 0)\n ),\n (H, W, B, 1)\n )\n anchors = np.reshape(\n np.concatenate((center_x, center_y, anchor_shapes), axis=3),\n (-1, 4)\n )\n\n return anchors", "def __init__(self, cfg):\r\n\r\n\t\tself.image_size = cfg.MODEL.INPUT.IMAGE_SIZE\r\n\t\tanchor_config = cfg.MODEL.ANCHORS\r\n\t\tself.feature_maps = anchor_config.FEATURE_MAPS\r\n\t\tself.min_sizes = anchor_config.MIN_SIZES\r\n\t\tself.max_sizes = anchor_config.MAX_SIZES \r\n\t\tself.aspect_ratios = anchor_config.ASPECT_RATIOS\r\n\t\tself.clip = anchor_config.CLIP", "def bayesian_model(y, x, data=None, correlation=False, family=\"Normal\", robust = True, samples = 1000, plot_posterior=True, plot_regression=True, plot_samples = \"default\", print_summary=True, alpha = 0.05):\n\n print(\"Starting Bayesian estimation...\")\n from theano import config\n config.warn.sum_div_dimshuffle_bug = False\n import pymc3\n\n\n\n y_name = \"y\"\n x_name = \"x\"\n\n if isinstance(y,str):\n y_name = y\n try:\n y = data[y]\n except:\n pass\n if isinstance(x,str):\n x_name = x\n try:\n x = data[x]\n except:\n pass\n\n\n if correlation == True:\n #Center and scale\n x = x - np.mean(x)\n x = x / np.std(x)\n\n y = y - np.mean(y)\n y = y / np.std(y)\n\n\n\n\n data = {y_name:y,\n x_name:x}\n formula = y_name + ' ~ ' + x_name\n\n\n\n\n\n\n\n\n\n\n with pymc3.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement\n if family == \"Normal\":\n family = pymc3.glm.families.Normal()\n if robust == True:\n family = pymc3.glm.families.StudentT()\n\n pymc3.glm.glm(formula, data, family=family)\n start = pymc3.find_MAP()\n step = pymc3.NUTS(scaling=start) # Instantiate MCMC sampling algorithm\n trace = pymc3.sample(samples, step, progressbar=True) # draw 2000 posterior samples using NUTS sampling\n\n# trace = trace[int(samples/4):]\n #PLOT POSTERIOR DISTRIBUTION\n if plot_posterior == True:\n pymc3.traceplot(trace)\n\n #PLOT LINES\n if plot_regression == True:\n plot_data= []\n plot_data.append(go.Scatter(x=x,\n y=y,\n mode = 'markers'))\n\n\n\n if plot_samples == \"default\":\n if len(trace) > 100:\n plot_samples = 100\n samples_range = np.random.randint(0, len(trace), plot_samples)\n else:\n plot_samples = samples\n samples_range = range(len(trace))\n else:\n samples_range = np.random.randint(0, len(trace), plot_samples)\n\n\n\n for i in samples_range:\n print(i)\n plot_data.append(go.Scatter(x=x,\n y=trace[i]['Intercept'] + trace[i]['x'] * x,\n mode = 'lines',\n opacity=0.25,\n line = {\"color\":\"grey\",\n \"width\":5}))\n layout = go.Layout(showlegend=False)\n figure = go.Figure(data = plot_data,layout=layout)\n py.plot(figure)\n\n if print_summary == True:\n print(pymc3.summary(trace,alpha=alpha))\n return(trace)", "def generate_homography_nn_adam(self):\n # Create the NN\n self.set_optimizer_adam()\n self.set_callback(utils.lr_callback)\n self.build_model()\n self.compile()", "def process_predictions_and_anchors(self, anchor_list, valid_flag_list,\n cls_scores, bbox_preds, img_metas,\n gt_bboxes_ignore_list):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n anchor_list_ = []\n valid_flag_list_ = []\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list_.append(torch.cat(anchor_list[i]))\n valid_flag_list_.append(torch.cat(valid_flag_list[i]))\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n\n num_levels = len(cls_scores)\n cls_score_list = []\n bbox_pred_list = []\n\n mlvl_cls_score_list = [\n cls_score.permute(0, 2, 3, 1).reshape(\n num_imgs, -1, self.num_base_priors * self.cls_out_channels)\n for cls_score in cls_scores\n ]\n mlvl_bbox_pred_list = [\n bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n self.num_base_priors * 4)\n for bbox_pred in bbox_preds\n ]\n\n for i in range(num_imgs):\n mlvl_cls_tensor_list = [\n mlvl_cls_score_list[j][i] for j in range(num_levels)\n ]\n mlvl_bbox_tensor_list = [\n mlvl_bbox_pred_list[j][i] for j in range(num_levels)\n ]\n cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0)\n cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0)\n cls_score_list.append(cat_mlvl_cls_score)\n bbox_pred_list.append(cat_mlvl_bbox_pred)\n return (anchor_list_, valid_flag_list_, num_level_anchors_list,\n cls_score_list, bbox_pred_list, gt_bboxes_ignore_list)", "def loss_HardNet(anchor, positive, anchor_swap = False, anchor_ave = False,\n margin = 1.0, batch_reduce = 'min', loss_type = \"triplet_margin\"):\n\n assert anchor.size() == positive.size(), \"Input sizes between positive and negative must be equal.\"\n assert anchor.dim() == 2, \"Inputd must be a 2D matrix.\"\n eps = 1e-8\n dist_matrix = distance_matrix_vector(anchor, positive) +eps # D = A_t*P\n eye = torch.autograd.Variable(torch.eye(dist_matrix.size(1))).cuda()\n\n # steps to filter out same patches that occur in distance matrix as negatives\n pos1 = torch.diag(dist_matrix)\n dist_without_min_on_diag = dist_matrix+eye*10\n\n # get all the indices which value<0.008\n mask = (dist_without_min_on_diag.ge(0.008).float()-1.0)*(-1)\n mask = mask.type_as(dist_without_min_on_diag)*10\n dist_without_min_on_diag = dist_without_min_on_diag+mask\n\n # sampling strategy of Hardest in batch\n if batch_reduce == 'min':\n # mining the value < 0.008(without mining on the diagonal)\n min_neg = torch.min(dist_without_min_on_diag, 1)[0]\n if anchor_swap:\n min_neg2 = torch.min(dist_without_min_on_diag, 0)[0]\n min_neg = torch.min(min_neg, min_neg2)\n\n \"\"\" print for debug\n dist_matrix_a = distance_matrix_vector(anchor, anchor)+ eps\n dist_matrix_p = distance_matrix_vector(positive,positive)+eps\n dist_without_min_on_diag_a = dist_matrix_a+eye*10\n dist_without_min_on_diag_p = dist_matrix_p+eye*10\n min_neg_a = torch.min(dist_without_min_on_diag_a,1)[0]\n min_neg_p = torch.t(torch.min(dist_without_min_on_diag_p,0)[0])\n min_neg_3 = torch.min(min_neg_p,min_neg_a)\n min_neg = torch.min(min_neg,min_neg_3)\n print (min_neg_a)\n print (min_neg_p)\n print (min_neg_3)\n print (min_neg)\n \"\"\"\n min_neg = min_neg\n pos = pos1\n elif batch_reduce == 'average':\n # why repeat pos value here?\n pos = pos1.repeat(anchor.size(0)).view(-1,1).squeeze(0)\n min_neg = dist_without_min_on_diag.view(-1,1)\n if anchor_swap:\n min_neg2 = torch.t(dist_without_min_on_diag).contiguous().view(-1,1)\n # compare anchor-pos vs. pos-anchor value\n min_neg = torch.min(min_neg, min_neg2)\n min_neg = min_neg.squeeze(0)\n elif batch_reduce == 'random':\n idxs = torch.autograd.Variable(torch.randperm(anchor.size()[0]).long()).cuda()\n min_neg = dist_without_min_on_diag.gather(1, idxs.view(-1,1))# dim=1, col-idx\n if anchor_swap:\n min_neg2 = torch.t(dist_without_min_on_diag).gather(1,idxs.view(-1,1)) \n min_neg = torch.min(min_neg, min_neg2)\n min_neg = torch.t(min_neg).squeeze(0)\n pos = pos1\n else: \n print ('Unknown batch reduce mode. Try min, average or random')\n sys.exit(1)\n\n # calculate the loss depends on the loss_type\n if loss_type == \"triplet_margin\":\n loss = torch.clamp(margin + pos - min_neg, min=0.0)\n elif loss_type == 'softmax':\n # Softmin used here: (-x) instead of x as the input\n # log-likelihood cost function instead of cross-entropy cost function\n exp_pos = torch.exp(2.0 - pos)\n exp_den = exp_pos + torch.exp(2.0 - min_neg) + eps\n loss = - torch.log( exp_pos / exp_den )\n elif loss_type == 'contrastive':\n loss = torch.clamp(margin - min_neg, min=0.0) + pos\n else: \n print ('Unknown loss type. Try triplet_margin, softmax or contrastive')\n sys.exit(1)\n\n loss = torch.mean(loss)\n return loss", "def get_atss_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes_list,\n img_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n label_channels=1,\n unmap_outputs=True,\n ):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n (\n all_anchors,\n all_labels,\n all_label_weights,\n all_bbox_targets,\n all_bbox_weights,\n pos_inds_list,\n neg_inds_list,\n ) = multi_apply(\n self._get_target_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n img_metas,\n label_channels=label_channels,\n unmap_outputs=unmap_outputs,\n )\n # no valid anchors\n if not all(labels is not None for labels in all_labels):\n return None\n # sampled anchors of all images\n num_total_pos = sum(max(inds.numel(), 1) for inds in pos_inds_list)\n num_total_neg = sum(max(inds.numel(), 1) for inds in neg_inds_list)\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n valid_label_mask = self.get_valid_label_mask(img_metas=img_metas, all_labels=all_labels)\n valid_label_mask = [i.to(anchor_list[0].device) for i in valid_label_mask]\n if len(valid_label_mask) > 0:\n valid_label_mask = images_to_levels(valid_label_mask, num_level_anchors)\n\n label_weights_list = images_to_levels(all_label_weights, num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)\n return (\n anchors_list,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n valid_label_mask,\n num_total_pos,\n num_total_neg,\n )", "def class_average_withali(images,ptcl_info,xform,ref,averager=(\"mean\",{}),normproc=(\"normalize.edgemean\",{}),setsfref=0,verbose=0):\n\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list\"\n\n\tincl=[]\n\texcl=[]\n#\txforms=[]\n\tavgr=Averagers.get(averager[0], averager[1])\n\tfor i in range(nimg):\n\t\timg=get_image(images,i,normproc)\n\t\tptcl_info[i]=(ptcl_info[i][0],xform*ptcl_info[i][1],ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n#\t\tptcl_info[i]=(ptcl_info[i][0],ptcl_info[i][1]*xform,ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n\t\timg.process_inplace(\"xform\",{\"transform\":ptcl_info[i][1]})\n\t\ttry: use=ptcl_info[i][2]\n\t\texcept: use=1\n\t\tif use :\n\t\t\tavgr.add_image(img)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\tif img.has_attr(\"source_n\") : incl.append(img[\"source_n\"])\n#\t\t\txforms.append(ptcl_info[i][1])\n\t\telif img.has_attr(\"source_n\") : excl.append(img[\"source_n\"])\n\n\tavg=avgr.finish()\n\n\t# normalize to the reference, this should make make3dpar work better as we can skip the normalization step\n\tif ref!=None :\n\t\tif setsfref:\n\t\t\tavg.process_inplace(\"filter.matchto\",{\"to\":ref,\"interpolate\":0,\"keephires\":1})\n\t\t\tavg-=avg.get_edge_mean()\n\t\telse : avg.process_inplace(\"normalize.toimage\",{\"to\":ref})\n\n\t\tavg[\"class_qual\"]=avg.cmp(\"ccc\",ref)\n\n\t# set some useful attributes\n\tif len(incl)>0 or len(excl)>0 :\n\t\tif len(incl)>0 : avg[\"class_ptcl_idxs\"]=incl\n\t\tif len(excl)>0 : avg[\"exc_class_ptcl_idxs\"]=excl\n#\t\tif len(xforms)>0: avg[\"class_ptcl_xforms\"]=xforms\n\t\tavg[\"class_ptcl_src\"]=img[\"source_path\"]\n\n\treturn avg", "def plot(model, pos=None, scale=1, figsize=(15, 8), interactive=False, title='bnlearn causal network', params = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}, verbose=3):\n defaults = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'heading':title, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}\n params = {**defaults, **params}\n\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n adjmat = model.get('adjmat', None)\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = bnlearn.network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = bnlearn.network.adjmat2graph(adjmat)\n # Get positions\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Make interactive or static plot\n if interactive:\n try:\n from pyvis import network as net\n from IPython.core.display import display, HTML\n # Convert adjacency matrix into Networkx Graph\n G = bnlearn.network.adjmat2graph(adjmat)\n # Setup of the interactive network figure\n g = net.Network(**params)\n # g = net.Network(directed=True, height='800px', width='70%', notebook=False, heading=title)\n g.from_nx(G)\n # Create advanced buttons\n g.show_buttons(filter_=['physics'])\n # Display\n filename = title.strip().replace(' ','_') + '.html'\n g.show(filename)\n display(HTML(filename))\n # webbrowser.open('bnlearn.html')\n except ModuleNotFoundError:\n if verbose>=2: print('[bnlearn] >\"pyvis\" module is not installed. Please pip install first: \"pip install pyvis\"')\n else:\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def get_augmentation_sequence():\n # Macro to apply something with 50% chance\n sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 50%\n rarely = lambda aug: iaa.Sometimes(0.1, aug) # 10%\n\n # Augmentation applied to every image\n # Augmentors sampled one value per channel\n aug_sequence = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.5), # vertically flip 50% of all images\n\n # crop images by -0.25% to 0.25% of their height/width\n # positive values crop the image, negative pad\n sometimes(iaa.CropAndPad(\n percent=(-0.25, 0.25),\n pad_mode=['constant', 'edge'], # pad with constant value of the edge value\n pad_cval=(0, 0) # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 0), # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n mode='constant' # ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # rarely(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),\n iaa.GaussianBlur((0, 3.0)),\n iaa.Add((-10, 10), per_channel=0.7), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)),\n # sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))\n ],\n random_order=True\n )\n\n return aug_sequence", "def forward(self, image, dtype=torch.float32):\n image_shape = image.shape[2:]\n\n if image_shape == self.last_shape and image.device in self.last_anchors:\n return self.last_anchors[image.device]\n\n if self.last_shape is None or self.last_shape != image_shape:\n self.last_shape = image_shape\n\n if dtype == torch.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n\n boxes_all = []\n for stride in self.strides:\n boxes_level = []\n for scale, ratio in itertools.product(self.scales, self.ratios):\n if image_shape[1] % stride != 0:\n raise ValueError('input size must be divided by the stride.')\n base_anchor_size = self.anchor_scale * stride * scale\n anchor_size_x_2 = base_anchor_size * ratio[0] / 2.0\n anchor_size_y_2 = base_anchor_size * ratio[1] / 2.0\n\n x = np.arange(stride / 2, image_shape[1], stride)\n y = np.arange(stride / 2, image_shape[0], stride)\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n # y1,x1,y2,x2\n boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,\n yv + anchor_size_y_2, xv + anchor_size_x_2))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n # concat anchors on the same level to the reshape NxAx4\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape([-1, 4]))\n\n anchor_boxes = np.vstack(boxes_all)\n\n anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device)\n anchor_boxes = anchor_boxes.unsqueeze(0)\n\n # save it for later use to reduce overhead\n self.last_anchors[image.device] = anchor_boxes\n return anchor_boxes", "def auto_set_anchors(self, kpt=(0.0, 0.0, 0.0)):\n ik = self.find_k(kpt)\n psi = self.get_psi_k(ik)[:, :] * self.occ[ik][None, :]\n psi_Dagger = psi.T.conj()\n self.cols = scdm(psi_Dagger, self.nwann)\n if self.sort_cols:\n self.cols = np.sort(self.cols)\n print(f\"The eigenvalues at anchor k: {self.get_eval_k(ik)}\")\n print(f\"anchor_kpt={kpt}. Selected columns: {self.cols}.\")", "def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter", "def label_anchors(anchors, anchor_is_untruncated, gt_classes, gt_bboxes, background_id, iou_low_threshold=0.41, iou_high_threshold=0.61):\n n = anchors.shape[0]\n k = gt_bboxes.shape[0]\n \n # Compute the IoUs of the anchors and ground truth boxes\n tiled_anchors = np.tile(np.expand_dims(anchors, 1), (1, k, 1))\n tiled_gt_bboxes = np.tile(np.expand_dims(gt_bboxes, 0), (n, 1, 1))\n\n tiled_anchors = tiled_anchors.reshape((-1, 4))\n tiled_gt_bboxes = tiled_gt_bboxes.reshape((-1, 4))\n\n ious, ioas, iogs = iou_bbox(tiled_anchors, tiled_gt_bboxes)\n ious = ious.reshape(n, k)\n ioas = ioas.reshape(n, k)\n iogs = iogs.reshape(n, k)\n\n # Label each anchor based on its max IoU\n max_ious = np.max(ious, axis=1)\n max_ioas = np.max(ioas, axis=1)\n max_iogs = np.max(iogs, axis=1)\n \n best_gt_bbox_ids = np.argmax(ious, axis=1)\n\n labels = -np.ones((n), np.int32)\n positive_idx = np.where(max_ious >= iou_high_threshold)[0]\n negative_idx = np.where(max_ious < iou_low_threshold)[0]\n labels[positive_idx] = 1\n labels[negative_idx] = 0\n \n # Truncated anchors are always ambiguous\n ignore_idx = np.where(anchor_is_untruncated==0)[0]\n labels[ignore_idx] = -1\n\n bboxes = gt_bboxes[best_gt_bbox_ids]\n\n classes = gt_classes[best_gt_bbox_ids]\n classes[np.where(labels<1)[0]] = background_id\n\n max_ious[np.where(anchor_is_untruncated==0)[0]] = -1\n max_ioas[np.where(anchor_is_untruncated==0)[0]] = -1\n max_iogs[np.where(anchor_is_untruncated==0)[0]] = -1\n\n return labels, bboxes, classes, max_ious, max_ioas, max_iogs", "def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)", "def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n anchors = generate_base_anchors(base_size=base_size, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))\n A = anchors.shape[0]\n shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_x)\n shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1)\n all_anchors = shifts[:, :, None] + anchors[None, None]\n return all_anchors", "def main():\n # args\n in_file = sys.argv[1]\n plot_prefix = sys.argv[2]\n\n # score key\n orig_impts_key = \"sequence-weighted\"\n match_impts_key = \"sequence-weighted.active\"\n\n # other\n LEFT_CLIP = 420\n RIGHT_CLIP = 580\n FINAL_LEFT_CLIP = 20\n FINAL_RIGHT_CLIP = 120\n \n # read in (sorted) file\n vignettes = pd.read_csv(in_file, sep=\"\\t\")\n\n #for i in range(vignettes.shape[0]):\n for i in range(400, 420):\n vignette = vignettes.iloc[i]\n file_name = vignette[\"file\"]\n example_idx = vignette[\"file_idx\"]\n\n # pull data and clip to match\n with h5py.File(file_name, \"r\") as hf:\n metadata = hf[DataKeys.SEQ_METADATA][example_idx,0]\n data = hf[DataKeys.WEIGHTED_SEQ][example_idx][:,LEFT_CLIP:RIGHT_CLIP]\n sig = hf[DataKeys.WEIGHTED_SEQ_ACTIVE][example_idx]\n assert metadata == vignette[\"metadata\"]\n\n region_id = metadata.split(\";\")[1].split(\"=\")[1].replace(\":\", \"_\")\n vignette_prefix = \"{}.{}.{}\".format(plot_prefix, region_id, example_idx)\n \n # normalize\n adjusted_scores = -scale_scores(data, sig)\n\n # plot full active region\n out_file = \"{}.impts.pdf\".format(vignette_prefix)\n plot_weights_group(adjusted_scores, out_file, sig_array=sig)\n\n # clip again and plot\n out_file = \"{}.impts.clipped.pdf\".format(vignette_prefix)\n clipped_scores = adjusted_scores[:,FINAL_LEFT_CLIP:FINAL_RIGHT_CLIP]\n clipped_sig = sig[:,FINAL_LEFT_CLIP:FINAL_RIGHT_CLIP]\n plot_weights_group(clipped_scores, out_file, sig_array=clipped_sig)\n \n # pull ATAC actual and predicted, save out and plot\n keep_indices = [0,1,2,3,4,5,6,9,10,12]\n with h5py.File(file_name, \"r\") as hf:\n actual = hf[\"ATAC_SIGNALS.NORM\"][example_idx][keep_indices]\n predicted = hf[\"logits.norm\"][example_idx][keep_indices]\n actual_v_predicted = pd.DataFrame({\n \"timepoint\": [\"d0.0\", \"d0.5\", \"d1.0\", \"d1.5\", \"d2.0\", \"d2.5\", \"d3.0\", \"d4.5\", \"d5.0\", \"d6.0\"],\n \"ATAC\": actual,\n \"predicted\": predicted}).set_index(\"timepoint\")\n plot_data_file = \"importances.atac.actual_v_predicted.txt\"\n actual_v_predicted.to_csv(plot_data_file, header=True, index=True, sep=\"\\t\")\n\n script_dir = \"/users/dskim89/git/ggr-project/figs/fig_2.modelling\"\n plot_file = \"{}.atac.actual_v_predicted.pdf\".format(vignette_prefix)\n plot_cmd = \"{}/plot.atac.actual_v_pred.R {} {}\".format(\n script_dir, plot_data_file, plot_file)\n print plot_cmd\n os.system(plot_cmd)\n \n return", "def compareAB(model1_name, model2_name, X_test_B, X_test_S, analysis_dir=\"Analysis/\"):\n #Load best weights\n model = tf.keras.models.load_model(\"Models/\"+model1_name)\n bkg_preds1 = model.predict(X_test_B).flatten()\n sig_preds1 = model.predict(X_test_S).flatten()\n\n model = tf.keras.models.load_model(\"Models/\"+model2_name)\n bkg_preds2 = model.predict(X_test_B).flatten()\n sig_preds2 = model.predict(X_test_S).flatten()\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds1 > thresh)/len(bkg_preds1)\n sig_eff_temp = np.sum(sig_preds1 > thresh)/len(sig_preds1)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model1_name + ' Background rejection @0.5 Signal efficiency = {:.2e}'.format(bkg_eff_50), xy=(0.05, 0.95), xycoords='axes fraction')\n print(sig_eff_50)\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds2 > thresh)/len(bkg_preds2)\n sig_eff_temp = np.sum(sig_preds2 > thresh)/len(sig_preds2)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model2_name + ' Background rejection @0.5 Signal efficiency = {:.3e}'.format(bkg_eff_50), xy=(0.05, 0.88), xycoords='axes fraction')\n print(sig_eff_50)\n\n plt.legend([model1_name, model2_name])\n plt.xlabel(\"Signal efficiency\")\n plt.ylabel(\"Background rejection\")\n plt.gcf().set_size_inches(8.3, 5.85)\n plt.savefig(analysis_dir+\"ROC\" + model1_name + \"VS\" + model2_name + \".pdf\", format=\"pdf\")\n plt.show()", "def __init__(self, size, stride, ratios=None, scales=None, *args, **kwargs):\n super(Anchors, self).__init__()\n # strides and sizes align with FPN feature outputs (p2-pn)\n self.size = size\n self.stride = stride\n # ratios and scales applied to all feature levels from FPN output\n if not ratios:\n ratios = [1] #used in RetinaFace since faces are typically square-like\n #ratios = [0.5, 1, 2]\n self.ratios = ratios\n \n if not scales:\n scales = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\n self.scales = scales\n self.n_anchors = len(ratios) * len(scales)\n self.anchor_utils = AnchorUtils(ratios=self.ratios, scales=self.scales)", "def __call__(self, loc, scores, anchors, img_size):\n anchors = bbox.loc2bbox(anchor)", "def fit_anchor_model(df, fit_genes, model, deg, x_col='lfc_target', y_col='lfc'):\n if fit_genes is not None:\n train_df = df.loc[df.target_gene.isin(fit_genes), :].copy()\n else:\n train_df = df\n train_x = train_df[x_col].copy()\n train_y = train_df[y_col].copy()\n test_x = df[x_col].copy()\n test_y = df[y_col].copy()\n if model == 'linear':\n predictions, model_info = model_linear(train_x, train_y, test_x)\n elif model == 'fixed slope':\n predictions, model_info = model_fixed_slope(train_x, train_y, test_x)\n elif model == 'spline':\n predictions, model_info = model_spline(train_x, train_y, test_x, deg)\n elif model == 'quadratic':\n predictions, model_info = model_quadratic(train_x, train_y, test_x)\n else:\n raise ValueError('Model ' + model + ' not implemented')\n out_df = df.copy()\n out_df['prediction'] = predictions\n out_df['residual'] = test_y - predictions\n out_df['residual_z'] = (out_df['residual'] - out_df['residual'].mean())/out_df['residual'].std()\n return out_df, model_info", "def to_BayesianModel(model, verbose=3):\n if isinstance(model, dict):\n adjmat = model.get('adjmat', None)\n else:\n adjmat = model\n if adjmat is None: raise Exception('[bnlearn] >Error: input for \"to_BayesianModel\" should be adjmat or a dict containing a key \"adjmat\".')\n\n if verbose>=3: print('[bnlearn] >Conversion of adjmat to BayesianModel.')\n\n # Convert to vector\n vec = adjmat2vec(adjmat)[['source', 'target']].values.tolist()\n # Make BayesianModel\n bayesianmodel = BayesianModel(vec)\n # Return\n return bayesianmodel", "def pseudo_gt_generate_per_image(self, box_cls, box_delta, anchors, image_size):\n boxes_all = []\n scores_all = []\n class_idxs_all = []\n\n # Iterate over every feature level\n for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta,\n anchors):\n # (HxWxAxK,)\n box_cls_i = box_cls_i.flatten().sigmoid_()\n\n # Keep top k top scoring indices only.\n num_topk = min(self.topk_candidates, box_reg_i.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n predicted_prob, topk_idxs = box_cls_i.sort(descending=True)\n predicted_prob = predicted_prob[:num_topk]\n topk_idxs = topk_idxs[:num_topk]\n\n # filter out the proposals with low confidence score\n keep_idxs = predicted_prob > self.pseudo_score_thres\n predicted_prob = predicted_prob[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n\n anchor_idxs = topk_idxs // self.num_classes\n classes_idxs = topk_idxs % self.num_classes\n\n box_reg_i = box_reg_i[anchor_idxs]\n anchors_i = anchors_i[anchor_idxs]\n # predict boxes\n predicted_boxes = self.box2box_transform.apply_deltas(\n box_reg_i, anchors_i.tensor)\n\n boxes_all.append(predicted_boxes)\n scores_all.append(predicted_prob)\n class_idxs_all.append(classes_idxs)\n\n boxes_all, scores_all, class_idxs_all = [\n cat(x) for x in [boxes_all, scores_all, class_idxs_all]\n ]\n keep = batched_nms(boxes_all, scores_all, class_idxs_all,\n self.nms_threshold)\n keep = keep[:self.max_detections_per_image]\n\n result = Instances(image_size)\n result.pred_boxes = Boxes(boxes_all[keep])\n result.scores = scores_all[keep]\n result.pred_classes = class_idxs_all[keep]\n return result", "def define_model(model):\n global log_data_likelihood, log_priors, num_params, file_labels, labels, prior_xs, prior_pdfs\n num_prior_pts = 1001\n pic50_lower = -4.\n pic50_upper = 14.\n hill_lower = 0.\n hill_upper = 6.\n if model == 1:\n num_params = 2\n log_data_likelihood = log_data_likelihood_model_1_capped\n log_priors = log_priors_model_1\n labels = [r\"$pIC50$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0], loc=mu, scale=s),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower),[0,0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n st.gamma.pdf(prior_xs[1], sigma_shape, loc=sigma_loc, scale=sigma_scale)]\n elif model == 2:\n num_params = 3\n log_data_likelihood = log_data_likelihood_model_2_capped\n log_priors = log_priors_model_2\n labels = [r\"$pIC50$\", r\"$Hill$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','Hill','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(hill_lower, hill_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.concatenate(([hill_uniform_lower-2,hill_uniform_lower],\n np.linspace(hill_uniform_lower, hill_uniform_upper, num_prior_pts),\n [hill_uniform_upper,hill_uniform_upper+2])),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0],loc=mu,scale=s),\n # st.fisk.pdf(prior_xs[1],c=beta,scale=alpha),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n # np.concatenate(([0, 0], np.ones(num_prior_pts) / (1. * sigma_uniform_upper - sigma_uniform_lower), [0, 0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n st.gamma.pdf(prior_xs[2], sigma_shape, loc=sigma_loc, scale=sigma_scale)]", "def analize(slugs, parameters_for_align, alpha_variability, alpha2_variability, beta_variability):\n i = alpha_variability[0]\n bestI = 0\n bestResult = 0\n while i < alpha_variability[1]:\n print(\"ALPHA=\"+str(i))\n align.ALPHA = i\n align.align_particular(parameters_for_align)\n current=main(slugs, True, False)\n if current>bestResult:\n bestResult = current\n bestI = i\n i += alpha_variability[2]\n align.ALPHA = bestI\n i = alpha2_variability[0]\n bestI2 = 0\n bestResult2 = 0\n while i < alpha2_variability[1]:\n print(\"ALPHA2=\"+str(i))\n align.ALPHA2 = i\n align.align_particular(parameters_for_align)\n current=main(slugs, False, False)\n if current>bestResult2:\n bestResult2 = current\n bestI2 = i\n i += alpha2_variability[2]\n align.ALPHA2 = bestI2\n i = beta_variability[0]\n bestI3 = 0\n bestResult3 = bestResult2\n while i < beta_variability[1]:\n print(\"BETHA=\" + str(i))\n align.BETHA = i\n align.align_particular(parameters_for_align)\n current = main(slugs, False, False)\n if current > bestResult3:\n bestResult3 = current\n bestI3 = i\n i += beta_variability[2]\n print(\"Best ALPHA=\"+str(bestI))\n print(\"Best ALPHA2=\" + str(bestI2))\n print(\"Best BETHA=\" + str(bestI3))\n print(\"Best result=\" + str(bestResult3))", "def __init__(self, n_x, n_z, qz_hid, px_hid, filters, seq_length=50, nonlinearity=rectify,\r\n px_nonlinearity=None, x_dist='linear', batchnorm=False, seed=1234):\r\n super(CVAE, self).__init__(n_x, qz_hid + px_hid, n_z, nonlinearity)\r\n self.x_dist = x_dist\r\n self.n_x = n_x\r\n self.seq_length = seq_length\r\n self.n_z = n_z\r\n self.batchnorm = batchnorm\r\n self._srng = RandomStreams(seed)\r\n\r\n # Pool layer cache\r\n pool_layers = []\r\n\r\n # Decide Glorot initializaiton of weights.\r\n init_w = 1e-3\r\n hid_w = \"\"\r\n if nonlinearity == rectify or nonlinearity == softplus:\r\n hid_w = \"relu\"\r\n\r\n # Define symbolic variables for theano functions.\r\n self.sym_x = T.tensor3('x') # inputs\r\n self.sym_z = T.matrix('z')\r\n self.sym_samples = T.iscalar('samples') # MC samples\r\n\r\n # Assist methods for collecting the layers\r\n def dense_layer(layer_in, n, dist_w=init.GlorotNormal, dist_b=init.Normal):\r\n dense = DenseLayer(layer_in, n, dist_w(hid_w), dist_b(init_w), None)\r\n if batchnorm:\r\n dense = bn(dense)\r\n return NonlinearityLayer(dense, self.transf)\r\n\r\n def stochastic_layer(layer_in, n, samples, nonlin=None):\r\n mu = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n logvar = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n return SampleLayer(mu, logvar, eq_samples=samples, iw_samples=1), mu, logvar\r\n\r\n def conv_layer(layer_in, filter, stride=(1, 1), pool=1, name='conv'):\r\n l_conv = Conv2DLayer(layer_in, num_filters=filter, filter_size=(3, 1), stride=stride, pad='full', name=name)\r\n if pool > 1:\r\n l_conv = MaxPool2DLayer(l_conv, pool_size=(pool, 1))\r\n pool_layers.append(l_conv)\r\n return l_conv\r\n\r\n # Reshape input\r\n l_x_in = InputLayer((None, seq_length, n_x), name='Input')\r\n l_x_in_reshp = ReshapeLayer(l_x_in, (-1, 1, seq_length, n_x))\r\n print(\"l_x_in_reshp\", l_x_in_reshp.output_shape)\r\n\r\n # CNN encoder implementation\r\n l_conv_enc = l_x_in_reshp\r\n for filter, stride, pool in filters:\r\n l_conv_enc = conv_layer(l_conv_enc, filter, stride, pool)\r\n print(\"l_conv_enc\", l_conv_enc.output_shape)\r\n\r\n # Pool along last 2 axes\r\n l_global_pool_enc = GlobalPoolLayer(l_conv_enc)\r\n l_enc = dense_layer(l_global_pool_enc, n_z)\r\n print(\"l_enc\", l_enc.output_shape)\r\n\r\n # Recognition q(z|x)\r\n l_qz = l_enc\r\n for hid in qz_hid:\r\n l_qz = dense_layer(l_qz, hid)\r\n l_qz, l_qz_mu, l_qz_logvar = stochastic_layer(l_qz, n_z, self.sym_samples)\r\n print(\"l_qz\", l_qz.output_shape)\r\n\r\n # Inverse pooling\r\n l_global_depool = InverseLayer(l_qz, l_global_pool_enc)\r\n print(\"l_global_depool\", l_global_depool.output_shape)\r\n\r\n # Reverse pool layer order\r\n pool_layers = pool_layers[::-1]\r\n\r\n # Decode\r\n l_deconv = l_global_depool\r\n for idx, filter in enumerate(filters[::-1]):\r\n filter, stride, pool = filter\r\n if pool > 1:\r\n l_deconv = InverseLayer(l_deconv, pool_layers[idx])\r\n l_deconv = Conv2DLayer(l_deconv, num_filters=filter, filter_size=(3, 1), stride=(stride, 1), W=init.GlorotNormal('relu'))\r\n print(\"l_deconv\", l_deconv.output_shape)\r\n\r\n # The last l_conv layer should give us the input shape\r\n l_dec = Conv2DLayer(l_deconv, num_filters=1, filter_size=(3, 1), pad='same', nonlinearity=None)\r\n print(\"l_dec\", l_dec.output_shape)\r\n\r\n # Flatten first two dimensions\r\n l_dec = ReshapeLayer(l_dec, (-1, n_x))\r\n\r\n l_px = l_dec\r\n if x_dist == 'bernoulli':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), sigmoid)\r\n elif x_dist == 'multinomial':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), softmax)\r\n elif x_dist == 'gaussian':\r\n l_px, l_px_mu, l_px_logvar = stochastic_layer(l_px, n_x, self.sym_samples, px_nonlinearity)\r\n elif x_dist == 'linear':\r\n l_px = DenseLayer(l_px, n_x, nonlinearity=None)\r\n\r\n # Reshape all the model layers to have the same size\r\n self.l_x_in = l_x_in\r\n\r\n self.l_qz = ReshapeLayer(l_qz, (-1, self.sym_samples, 1, n_z))\r\n self.l_qz_mu = DimshuffleLayer(l_qz_mu, (0, 'x', 'x', 1))\r\n self.l_qz_logvar = DimshuffleLayer(l_qz_logvar, (0, 'x', 'x', 1))\r\n\r\n self.l_px = DimshuffleLayer(ReshapeLayer(l_px, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4))\r\n self.l_px_mu = DimshuffleLayer(ReshapeLayer(l_px_mu, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n self.l_px_logvar = DimshuffleLayer(ReshapeLayer(l_px_logvar, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n\r\n # Predefined functions\r\n inputs = {self.l_x_in: self.sym_x}\r\n outputs = get_output(l_qz, inputs, deterministic=True)\r\n self.f_qz = theano.function([self.sym_x, self.sym_samples], outputs)\r\n\r\n inputs = {l_qz: self.sym_z}\r\n outputs = get_output(self.l_px, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_px = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_mu, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_mu = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_logvar, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_var = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n # Define model parameters\r\n self.model_params = get_all_params([self.l_px])\r\n self.trainable_model_params = get_all_params([self.l_px], trainable=True)", "def __init__(\r\n self,\r\n centre=0.0, # <- PyAutoFit recognises these constructor arguments\r\n intensity=0.1, # <- are the Gaussian`s model parameters.\r\n sigma=0.01,\r\n ):\r\n\r\n self.centre = centre\r\n self.intensity = intensity\r\n self.sigma = sigma", "def make_anchors(self, img_shape, dtype=np.float32):\n return anchor_utils.ssd_anchors_all_layers(img_shape,\n self.params.feat_shapes,\n self.params.anchor_sizes,\n self.params.anchor_ratios,\n self.params.anchor_steps,\n self.params.anchor_offset,\n dtype)", "def to_bayesianmodel(model, verbose=3):\n if isinstance(model, dict):\n adjmat = model.get('adjmat', None)\n else:\n adjmat = model\n if adjmat is None: raise Exception('[bnlearn] >Error: input for \"to_bayesianmodel\" should be adjmat or a dict containing a key \"adjmat\".')\n\n if verbose>=3: print('[bnlearn] >Conversion of adjmat to BayesianModel.')\n\n # Convert to vector\n vec = adjmat2vec(adjmat)[['source', 'target']].values.tolist()\n # Make BayesianModel\n bayesianmodel = BayesianModel(vec)\n # Return\n return bayesianmodel", "def inference_single_image(self, box_cls, box_delta, anchors, image_size):\n boxes_all = []\n scores_all = []\n class_idxs_all = []\n\n # Iterate over every feature level\n for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta,\n anchors):\n # (HxWxAxK,)\n box_cls_i = box_cls_i.flatten().sigmoid_()\n\n # Keep top k top scoring indices only.\n num_topk = min(self.topk_candidates, box_reg_i.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n predicted_prob, topk_idxs = box_cls_i.sort(descending=True)\n predicted_prob = predicted_prob[:num_topk]\n topk_idxs = topk_idxs[:num_topk]\n\n # filter out the proposals with low confidence score\n keep_idxs = predicted_prob > self.score_threshold\n predicted_prob = predicted_prob[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n\n anchor_idxs = topk_idxs // self.num_classes\n classes_idxs = topk_idxs % self.num_classes\n\n box_reg_i = box_reg_i[anchor_idxs]\n anchors_i = anchors_i[anchor_idxs]\n # predict boxes\n predicted_boxes = self.box2box_transform.apply_deltas(\n box_reg_i, anchors_i.tensor)\n\n boxes_all.append(predicted_boxes)\n scores_all.append(predicted_prob)\n class_idxs_all.append(classes_idxs)\n\n boxes_all, scores_all, class_idxs_all = [\n cat(x) for x in [boxes_all, scores_all, class_idxs_all]\n ]\n keep = batched_nms(boxes_all, scores_all, class_idxs_all,\n self.nms_threshold)\n keep = keep[:self.max_detections_per_image]\n\n result = Instances(image_size)\n result.pred_boxes = Boxes(boxes_all[keep])\n result.scores = scores_all[keep]\n result.pred_classes = class_idxs_all[keep]\n return result", "def compute_saliency(model, guided_model, img_path, layer_name=conv_name, cls=-1, visualize=False, save=True):\n #--------- slide image get --------------\n ori_img = Image.open(img_path)\n\n #define slide range\n slide_xl = 0\n slide_xr = 100\n slide_yu = 0\n slide_yd = 100\n name_cnt_int = 1\n\n for m in range(9):\n for i in range(9):\n slide_img = ori_img.crop((slide_xl,slide_yu,slide_xr,slide_yd))\n name_cnt_str = str(name_cnt_int)\n roop_str = str(m)\n slide_name = './slide_img/slide_img_' + roop_str + '_' + name_cnt_str + '.jpg'\n slide_img.save(slide_name)\n preprocessed_input = load_image(slide_name)\n\n pred = model.predict(preprocessed_input)[0]\n #print(pred)\n top_n = 3\n top_indices = pred.argsort()[-top_n:][::-1]\n result = [(classes[i], pred[i]) for i in top_indices]\n #print(\"number: \",name_cnt_str)\n print(\"number:\",roop_str,name_cnt_str)\n print(\"xrange: \",slide_xl,slide_xr)\n print(\"yrange: \",slide_yu,slide_yd)\n for x in result:\n print(x)\n\n if cls == -1:\n cls = np.argmax(pred)\n \n print(\"argmax:\",cls)\n if cls == 1:\n print(\"\\n\")\n print(\"-----Careful-----\")\n print(\"-----Doubt spotted-----\")\n print(\"\\n\")\n\n if cls == 2:\n print(\"\\n\")\n print(\"-----Warning!!!-----\")\n print(\"-----Bad spotted!!!!!-----\")\n print(\"\\n\")\n\n gradcam = grad_cam(model, preprocessed_input, cls, layer_name)\n gb = guided_backprop(guided_model, preprocessed_input, layer_name)\n guided_gradcam = gb * gradcam[..., np.newaxis]\n cls = -1\n\n if save:\n cam_name = './cam_image/' + roop_str + '_' + name_cnt_str + '.jpg'\n jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)\n jetcam = (np.float32(jetcam) + load_image(slide_name, preprocess=False)) / 2\n cv2.imwrite(cam_name, np.uint8(jetcam))\n #cv2.imwrite('guided_backprop.jpg', deprocess_image(gb[0]))\n #cv2.imwrite('guided_gradcam.jpg', deprocess_image(guided_gradcam[0]))\n \n name_cnt_int = int(name_cnt_str)\n name_cnt_int += 1\n #x軸スライド幅\n slide_xl += 50\n slide_xr += 50\n \n \n if visualize:\n \n plt.figure(figsize=(15, 10))\n plt.subplot(131)\n plt.title('GradCAM')\n plt.axis('off')\n plt.imshow(load_image(img_path, preprocess=False))\n plt.imshow(gradcam, cmap='jet', alpha=0.5)\n\n plt.subplot(132)\n plt.title('Guided Backprop')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(gb[0]), -1))\n \n plt.subplot(133)\n plt.title('Guided GradCAM')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(guided_gradcam[0]), -1))\n plt.show()\n\n #右端までスライド完了、y軸方向へスライド\n name_cnt_int = 0\n slide_xl = 0\n slide_xr = 100\n slide_yu = slide_yu + 50\n slide_yd = slide_yd + 50\n \n \n\n return gradcam, gb, guided_gradcam", "def create_target_np(\r\n all_anchors,\r\n gt_boxes,\r\n similarity_fn,\r\n box_encoding_fn,\r\n prune_anchor_fn=None,\r\n gt_classes=None,\r\n matched_threshold=0.6,\r\n unmatched_threshold=0.45,\r\n bbox_inside_weight=None,\r\n positive_fraction=None,\r\n rpn_batch_size=300,\r\n norm_by_num_examples=False,\r\n box_code_size=7,\r\n):\r\n total_anchors = all_anchors.shape[0]\r\n if prune_anchor_fn is not None:\r\n inds_inside = prune_anchor_fn(all_anchors)\r\n anchors = all_anchors[inds_inside, :]\r\n if not isinstance(matched_threshold, float):\r\n matched_threshold = matched_threshold[inds_inside]\r\n if not isinstance(unmatched_threshold, float):\r\n unmatched_threshold = unmatched_threshold[inds_inside]\r\n else:\r\n anchors = all_anchors\r\n inds_inside = None\r\n num_inside = len(inds_inside) if inds_inside is not None else total_anchors\r\n logger.debug(\"total_anchors: {}\".format(total_anchors))\r\n logger.debug(\"inds_inside: {}\".format(num_inside))\r\n logger.debug(\"anchors.shape: {}\".format(anchors.shape))\r\n if gt_classes is None:\r\n gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32)\r\n # Compute anchor labels:\r\n # label=1 is positive, 0 is negative, -1 is don't care (ignore)\r\n labels = np.empty((num_inside,), dtype=np.int32)\r\n gt_ids = np.empty((num_inside,), dtype=np.int32)\r\n labels.fill(-1)\r\n gt_ids.fill(-1)\r\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\r\n # Compute overlaps between the anchors and the gt boxes overlaps\r\n anchor_by_gt_overlap = similarity_fn(anchors, gt_boxes)\r\n # Map from anchor to gt box that has highest overlap\r\n anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)\r\n # For each anchor, amount of overlap with most overlapping gt box\r\n anchor_to_gt_max = anchor_by_gt_overlap[\r\n np.arange(num_inside), anchor_to_gt_argmax\r\n ] #\r\n # Map from gt box to an anchor that has highest overlap\r\n gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)\r\n # For each gt box, amount of overlap with most overlapping anchor\r\n gt_to_anchor_max = anchor_by_gt_overlap[\r\n gt_to_anchor_argmax, np.arange(anchor_by_gt_overlap.shape[1])\r\n ]\r\n # must remove gt which doesn't match any anchor.\r\n empty_gt_mask = gt_to_anchor_max == 0\r\n gt_to_anchor_max[empty_gt_mask] = -1\r\n # Find all anchors that share the max overlap amount\r\n # (this includes many ties)\r\n anchors_with_max_overlap = np.where(anchor_by_gt_overlap == gt_to_anchor_max)[0]\r\n # Fg label: for each gt use anchors with highest overlap\r\n # (including ties)\r\n gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]\r\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]\r\n gt_ids[anchors_with_max_overlap] = gt_inds_force\r\n # Fg label: above threshold IOU\r\n pos_inds = anchor_to_gt_max >= matched_threshold\r\n gt_inds = anchor_to_gt_argmax[pos_inds]\r\n labels[pos_inds] = gt_classes[gt_inds]\r\n gt_ids[pos_inds] = gt_inds\r\n bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]\r\n else:\r\n bg_inds = np.arange(num_inside)\r\n fg_inds = np.where(labels > 0)[0]\r\n fg_max_overlap = None\r\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\r\n fg_max_overlap = anchor_to_gt_max[fg_inds]\r\n gt_pos_ids = gt_ids[fg_inds]\r\n # subsample positive labels if we have too many\r\n if positive_fraction is not None:\r\n num_fg = int(positive_fraction * rpn_batch_size)\r\n if len(fg_inds) > num_fg:\r\n disable_inds = npr.choice(\r\n fg_inds, size=(len(fg_inds) - num_fg), replace=False\r\n )\r\n labels[disable_inds] = -1\r\n fg_inds = np.where(labels > 0)[0]\r\n\r\n # subsample negative labels if we have too many\r\n # (samples with replacement, but since the set of bg inds is large most\r\n # samples will not have repeats)\r\n num_bg = rpn_batch_size - np.sum(labels > 0)\r\n if len(bg_inds) > num_bg:\r\n enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]\r\n labels[enable_inds] = 0\r\n bg_inds = np.where(labels == 0)[0]\r\n else:\r\n if len(gt_boxes) == 0 or anchors.shape[0] == 0:\r\n labels[:] = 0\r\n else:\r\n labels[bg_inds] = 0\r\n # re-enable anchors_with_max_overlap\r\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]\r\n bbox_targets = np.zeros((num_inside, box_code_size), dtype=all_anchors.dtype)\r\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\r\n bbox_targets[fg_inds, :] = box_encoding_fn(\r\n gt_boxes[anchor_to_gt_argmax[fg_inds], :], anchors[fg_inds, :]\r\n )\r\n # Bbox regression loss has the form:\r\n # loss(x) = weight_outside * L(weight_inside * x)\r\n # Inside weights allow us to set zero loss on an element-wise basis\r\n # Bbox regression is only trained on positive examples so we set their\r\n # weights to 1.0 (or otherwise if config is different) and 0 otherwise\r\n # NOTE: we don't need bbox_inside_weights, remove it.\r\n\r\n # The bbox regression loss only averages by the number of images in the\r\n # mini-batch, whereas we need to average by the total number of example\r\n # anchors selected\r\n # Outside weights are used to scale each element-wise loss so the final\r\n # average over the mini-batch is correct\r\n bbox_outside_weights = np.zeros((num_inside,), dtype=all_anchors.dtype)\r\n # uniform weighting of examples (given non-uniform sampling)\r\n if norm_by_num_examples:\r\n num_examples = np.sum(labels >= 0) # neg + pos\r\n num_examples = np.maximum(1.0, num_examples)\r\n bbox_outside_weights[labels > 0] = 1.0 / num_examples\r\n else:\r\n bbox_outside_weights[labels > 0] = 1.0\r\n\r\n # Map up to original set of anchors\r\n if inds_inside is not None:\r\n labels = unmap(labels, total_anchors, inds_inside, fill=-1)\r\n bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)\r\n # bbox_inside_weights = unmap(\r\n # bbox_inside_weights, total_anchors, inds_inside, fill=0)\r\n bbox_outside_weights = unmap(\r\n bbox_outside_weights, total_anchors, inds_inside, fill=0\r\n )\r\n ret = {\r\n \"labels\": labels,\r\n \"bbox_targets\": bbox_targets,\r\n \"bbox_outside_weights\": bbox_outside_weights,\r\n \"assigned_anchors_overlap\": fg_max_overlap,\r\n \"positive_gt_id\": gt_pos_ids,\r\n }\r\n if inds_inside is not None:\r\n ret[\"assigned_anchors_inds\"] = inds_inside[fg_inds]\r\n else:\r\n ret[\"assigned_anchors_inds\"] = fg_inds\r\n return ret", "def generate_feature_level_base_anchors(self, size):\n \n anchors = np.zeros((self.n_anchors, 4)) \n #scale base size at different scales\n anchors[:, 2:] = size * np.tile(self.scales, (2, len(self.ratios))).T\n # get different combinations of aspect ratios\n areas = anchors[:, 2] * anchors[:, 3]\n anchors[:, 2] = np.sqrt(areas / np.repeat(self.ratios, len(self.scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(self.ratios, len(self.scales))\n \n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n \n #self.base_anchors = tf.cast(anchors, dtype=tf.float32)\n return anchors", "def front_column_model_p_gain():", "def __init__(self, sigma=0.4, alpha=1.0, reset_always=True):\n super().__init__()\n self.sigma = sigma\n self.alpha = alpha\n self.reset_always = reset_always\n self.guided_attn_masks = None\n self.masks = None", "def main(model=\"pytorch\", img_path=\"./test-asan test/biopsy/malignantmelanoma/\"):\n # load model selection\n assert model in [\"pytorch\", \"caffe\"]\n if model == \"pytorch\":\n difev_vars.model = PytorchModel()\n if model == \"caffe\":\n difev_vars.model = CaffeModel()\n\n results = {}\n fig_path = './difev1/'\n if os.path.exists(fig_path + os.sep + 'results_' + model + '.pkl'):\n results = pickle.load(open(fig_path + os.sep + 'results_' + model + '.pkl', 'rb'))\n\n attacks = [ColorAttack(), PixelAttack(), RotationTranslationAttack()]\n for attack in attacks:\n for filename in os.listdir(img_path):\n if filename + os.sep + attack.name + os.sep + model in results:\n print('skipping')\n continue\n\n outcome = run_attack(attack, img_path=img_path, filename=filename, target='nevus', fig_path='./difev1/',\n save=False)\n results[filename + os.sep + attack.name + os.sep + model] = {'outcome': outcome,\n 'orig': difev_vars.prob_orig[\n difev_vars.pred_orig],\n \"adversarial\":\n difev_vars.prob_adv[difev_vars.pred_adv]}\n if os.path.exists(fig_path + os.sep + 'results_' + model + '.pkl'):\n copyfile(fig_path + os.sep + 'results_' + model + '.pkl',\n fig_path + os.sep + 'results_' + model + '.old')\n pickle.dump(results, open(fig_path + os.sep + 'results_' + model + '.pkl', 'wb'))", "def load_model_meta(meta_path):\r\n xlsx = pd.ExcelFile(meta_path)\r\n \r\n #The zooming factor is saved in the UsedData sheet\r\n meta = pd.read_excel(xlsx,sheet_name=\"UsedData\")\r\n zoom_factor = meta[\"zoom_factor\"].iloc[0]#should images be zoomed before forwarding through neural net?\r\n\r\n meta = pd.read_excel(xlsx,sheet_name=\"Parameters\")\r\n\r\n try:\r\n model_type = meta[\"Chosen Model\"].iloc[0]#input dimensions of the model\r\n except:\r\n model_type = \"Unknown\"\r\n \r\n try:\r\n target_imsize = meta[\"Input image crop\"].iloc[0]#input dimensions of the model\r\n except:\r\n target_imsize = meta[\"Input image size\"].iloc[0]#input dimensions of the model\r\n\r\n normalization_method = meta[\"Normalization\"].iloc[0]#normalization method\r\n if normalization_method == \"StdScaling using mean and std of all training data\": \r\n mean_trainingdata = meta[\"Mean of training data used for scaling\"]\r\n std_trainingdata = meta[\"Std of training data used for scaling\"]\r\n else:\r\n mean_trainingdata = None\r\n std_trainingdata = None\r\n \r\n #Following parameters may not exist in meta files of older AID versions. Hence try/except\r\n\r\n #Color mode: grayscale or RGB?\r\n try:\r\n target_channels = meta[\"Color Mode\"].iloc[0]\r\n except:\r\n target_channels = \"grayscale\"\r\n if target_channels.lower() ==\"grayscale\":\r\n target_channels = 1\r\n elif target_channels.lower() ==\"rgb\":\r\n target_channels = 3\r\n\r\n #The order for the zooming operation\r\n try:\r\n zoom_interpol_method = meta[\"Zoom order\"].iloc[0]\r\n except:\r\n zoom_interpol_method = \"cv2.INTER_NEAREST\"\r\n #Translate zoom_interpol_method to OpenCV argument\r\n if \"cv2.\" not in str(zoom_interpol_method):\r\n zoom_interpol_method = zoom_arguments_scipy2cv(zoom_factor,zoom_interpol_method)\r\n\r\n #Padding mode\r\n try:\r\n padding_mode = meta[\"paddingMode\"].iloc[0]\r\n except:\r\n padding_mode = \"constant\"#cv2.BORDER_CONSTANT\r\n #translate padding_mode to OpenCV argument\r\n if \"cv2.\" not in padding_mode:\r\n padding_mode = pad_arguments_np2cv(padding_mode)\r\n\r\n #Write information in one DataFrame\r\n img_processing_settings = pd.DataFrame()\r\n img_processing_settings[\"model_type\"]=model_type,\r\n img_processing_settings[\"target_imsize\"]=target_imsize,\r\n img_processing_settings[\"target_channels\"]=target_channels,\r\n img_processing_settings[\"normalization_method\"]=normalization_method,\r\n img_processing_settings[\"mean_trainingdata\"]=mean_trainingdata,\r\n img_processing_settings[\"std_trainingdata\"]=std_trainingdata,\r\n img_processing_settings[\"zoom_factor\"]=zoom_factor,\r\n img_processing_settings[\"zoom_interpol_method\"]=zoom_interpol_method,\r\n img_processing_settings[\"padding_mode\"]=padding_mode,\r\n \r\n return img_processing_settings", "def __init__(self, model, batch_size=1, confidence=CONFIDENCE,\n targeted=TARGETED, learning_rate=LEARNING_RATE,\n binary_search_steps=BINARY_SEARCH_STEPS, max_iterations=MAX_ITERATIONS, print_every=100, early_stop_iters=0,\n abort_early=ABORT_EARLY,\n initial_c=INITIAL_C,\n use_log=True, use_tanh=True, use_resize=False, adam_beta1=0.9, adam_beta2=0.999, reset_adam_after_found=False,\n solver=\"adam\", save_ckpts=\"\", load_checkpoint=\"\", start_iter=0,\n init_size=32, use_importance=False, device=\"cuda\"):\n\n if solver != \"fake_zero\":\n torch.set_grad_enabled(False)\n\n self.image_size, self.num_channels, num_labels = model.image_size, model.num_channels, model.num_labels\n self.model = model\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.print_every = print_every\n self.early_stop_iters = early_stop_iters if early_stop_iters != 0 else max_iterations // 10\n print(\"early stop:\", self.early_stop_iters)\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.initial_c = initial_c\n self.start_iter = start_iter\n self.batch_size = batch_size\n self.num_channels = self.num_channels\n self.resize_init_size = init_size\n self.use_importance = use_importance\n if use_resize:\n self.small_x = self.resize_init_size\n self.small_y = self.resize_init_size\n else:\n self.small_x = self.image_size\n self.small_y = self.image_size\n\n self.use_tanh = use_tanh\n self.use_resize = use_resize\n self.save_ckpts = save_ckpts\n if save_ckpts:\n os.system(\"mkdir -p {}\".format(save_ckpts))\n\n self.repeat = binary_search_steps >= 10\n self.device = device\n\n # each batch has a different modifier value (see below) to evaluate\n # small_shape = (None,self.small_x,self.small_y,num_channels)\n\n single_shape = (self.num_channels, self.image_size, self.image_size)\n small_single_shape = (self.num_channels, self.small_x, self.small_y)\n\n # the variable we're going to optimize over\n # support multiple batches\n # support any size image, will be resized to model native size\n\n # the real variable, initialized to 0\n self.load_checkpoint = load_checkpoint\n if load_checkpoint:\n # if checkpoint is incorrect reshape will fail\n print(\"Using checkpint\", load_checkpoint)\n self.real_modifier = torch.load(load_checkpoint).reshape(\n (1,) + small_single_shape, map_location=torch.device(device))\n else:\n self.real_modifier = torch.zeros(\n (1,) + small_single_shape, dtype=torch.float32, device=self.device)\n\n if solver == \"fake_zero\":\n self.real_modifier.requires_grad = True\n # self.real_modifier = np.random.randn(image_size * image_size * num_channels).astype(torch.float32).reshape((1,) + single_shape)\n # self.real_modifier /= np.linalg.norm(self.real_modifier)\n # these are variables to be more efficient in sending data to tf\n # we only work on 1 image at once; the batch is for evaluation loss at different modifiers\n self.true_img = torch.zeros(single_shape, device=self.device)\n self.true_label_1hot = torch.zeros(num_labels, device=self.device)\n self.c = 0.0\n\n # prepare the list of all valid variables\n var_size = self.small_x * self.small_y * self.num_channels\n self.use_var_len = var_size\n self.var_list = torch.tensor(\n range(0, self.use_var_len), dtype=torch.int64, device=self.device)\n self.used_var_list = torch.zeros(\n var_size, dtype=torch.int64, device=self.device)\n self.sample_prob = torch.ones(\n var_size, dtype=torch.float32, device=self.device) / var_size\n\n # upper and lower bounds for the modifier\n self.modifier_up = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n self.modifier_down = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n\n # random permutation for coordinate update\n self.perm = torch.randperm(var_size)\n self.perm_index = 0\n\n # ADAM status\n self.mt = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n self.vt = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n # self.beta1 = 0.8\n # self.beta2 = 0.99\n self.beta1 = adam_beta1\n self.beta2 = adam_beta2\n self.reset_adam_after_found = reset_adam_after_found\n self.adam_epoch = torch.ones(\n var_size, dtype=torch.int64, device=self.device)\n self.stage = 0\n # variables used during optimization process\n self.grad = torch.zeros(\n batch_size, dtype=torch.float32, device=self.device)\n self.hess = torch.zeros(\n batch_size, dtype=torch.float32, device=self.device)\n # compile numba function\n # self.coordinate_ADAM_numba = jit(coordinate_ADAM, nopython = True)\n # self.coordinate_ADAM_numba.recompile()\n # print(self.coordinate_ADAM_numba.inspect_llvm())\n # np.set_printoptions(threshold=np.nan)\n # set solver\n solver = solver.lower()\n self.solver_name = solver\n if solver == \"adam\":\n self.solver = coordinate_ADAM\n if solver == \"adam_torch\":\n self.solver = coordinate_ADAM_torch\n elif solver == \"newton\":\n self.solver = coordinate_Newton\n elif solver == \"adam_newton\":\n self.solver = coordinate_Newton_ADAM\n elif solver != \"fake_zero\":\n print(\"unknown solver\", solver)\n self.solver = coordinate_ADAM\n print(\"Using\", solver, \"solver\")", "def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Gaussian`s model parameters.\r\n sigma=5.0,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.sigma = sigma", "def __init__(self, args, number_of_labels, number_of_features,adj):\n super(SpGAT, self).__init__()\n self.args=args\n \n self.number_of_labels = number_of_labels\n self.number_of_features = number_of_features\n self.device = args.device\n self.adj= sparse_mx_to_torch_sparse_tensor(adj).to(self.device).to_dense()\n self.attentions = [SpGraphAttentionLayer(number_of_features, \n args.hidden, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=True) for _ in range(args.nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(args.hidden * args.nheads, \n args.Q, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=False)", "def bias_prior(self):", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def define_gan(g_model, d_model):\r\n # make weights in the discriminator (some shared with the q model) as not trainable\r\n d_model.trainable = False\r\n # connect g outputs to d inputs\r\n d_output = d_model(g_model.output)\r\n # define composite model\r\n model = Model(g_model.input, d_output)\r\n # compile model\r\n opt = Adam(lr=0.0001, beta_1=0.5)\r\n model.compile(loss=['binary_crossentropy'], optimizer=opt)\r\n return model", "def __call__(self, targets, logits, seq_length):\n\t\t# Clean spectograms of sources\n\t\tspectrogram_targets = targets['multi_targets']\n\n\t\t# Spectogram of the original mixture, used to mask for scoring\n\t\tmix_to_mask = targets['mix_to_mask']\n\n\t\t# Length of sequences\n\t\tseq_length = seq_length['bin_emb']\n\t\t# Logits (=output network)\n\t\temb_vec = logits['bin_emb']\n\t\tanchors = logits['anchors']\n\n\t\tif 'speaker_logits' in logits:\n\t\t\t# Assuming dimensions are B x T x S\n\t\t\tspeaker_logits = logits['speaker_logits']\n\t\t\tav_speaker_logits_time_flag = self.lossconf['av_speaker_logits_time_flag'] == 'True'\n\t\telse:\n\t\t\tspeaker_logits = None\n\n\t\tif 'anchors_scale' in logits:\n\t\t\t# Assuming dimensions are B x T x S\n\t\t\tanchors_scale = logits['anchors_scale']\n\t\t\tanchors_scale = anchors_scale[0, 0]\n\t\telse:\n\t\t\tanchors_scale = None\n\n\t\ttime_anchors_flag = self.lossconf['time_anchors_flag'] == 'True'\n\t\tav_anchors_time_flag = (self.lossconf['av_anchors_time_flag'] == 'True') and time_anchors_flag\n\t\tactivation = self.lossconf['activation']\n\t\tnormalize_embs = self.lossconf['normalize_embs'] == 'True'\n\t\tnormalize_anchors = self.lossconf['normalize_anchors'] == 'True'\n\t\tif 'do_square' in self.lossconf:\n\t\t\tdo_square = self.lossconf['do_square'] == 'True'\n\t\telse:\n\t\t\tdo_square = True\n\n\t\twith tf.name_scope('anchor_deepattractornet_loss'):\n\n\t\t\tfeat_dim = spectrogram_targets.get_shape()[2]\n\t\t\temb_dim = anchors.get_shape()[-1]\n\t\t\ttime_dim = tf.shape(anchors)[1]\n\t\t\tnrS = spectrogram_targets.get_shape()[3]\n\n\t\t\tV = tf.reshape(emb_vec, [self.batch_size, -1, feat_dim, emb_dim], name='V') # dim: (B x T x F x D)\n\t\t\tif normalize_embs:\n\t\t\t\tV = V / (tf.norm(V, axis=-1, keepdims=True) + 1e-12)\n\t\t\ttime_dim = tf.shape(V)[1]\n\n\t\t\tif not time_anchors_flag:\n\t\t\t\tanchors = tf.tile(tf.expand_dims(tf.expand_dims(anchors, 0), 0), [self.batch_size, time_dim, 1, 1]) # dim: (B x T x S x D)\n\n\t\t\tif normalize_anchors:\n\t\t\t\tanchors = anchors / (tf.norm(anchors, axis=-1, keepdims=True) + 1e-12)\n\n\t\t\tif speaker_logits is not None:\n\t\t\t\tspeaker_logits = tf.expand_dims(speaker_logits, -1)\n\t\t\t\tif av_speaker_logits_time_flag:\n\t\t\t\t\tspeaker_logits = tf.reduce_mean(speaker_logits, 1, keepdims=True)\n\t\t\t\tanchors *= speaker_logits\n\n\t\t\tif anchors_scale is not None:\n\t\t\t\tanchors *= anchors_scale\n\n\t\t\tif av_anchors_time_flag:\n\t\t\t\tanchors = tf.reduce_mean(anchors, axis=1, keepdims=True)\n\t\t\t\tanchors = tf.tile(anchors, [1, time_dim, 1, 1])\n\n\t\t\tprod_1 = tf.matmul(V, anchors, transpose_a=False, transpose_b=True, name='AVT')\n\n\t\t\tif activation == 'softmax':\n\t\t\t\tmasks = tf.nn.softmax(prod_1, axis=-1, name='M') # dim: (B x T x F x nrS)\n\t\t\telif activation in ['None', 'none', None]:\n\t\t\t\tmasks = prod_1\n\t\t\telif activation == 'sigmoid':\n\t\t\t\tmasks = tf.nn.sigmoid(prod_1, name='M')\n\t\t\telse:\n\t\t\t\tmasks = tf.nn.sigmoid(prod_1, name='M')\n\n\t\t\tX = tf.expand_dims(mix_to_mask, -1, name='X') # dim: (B x T x F x 1)\n\t\t\treconstructions = tf.multiply(masks, X) # dim: (B x T x F x nrS)\n\t\t\treconstructions = tf.transpose(reconstructions, perm=[3, 0, 1, 2]) # dim: (nrS x B x T x F)\n\n\t\t\tS = tf.transpose(spectrogram_targets, [3, 0, 1, 2]) # nrS x B x T x F\n\n\t\t\tif 'vad_targets' in targets:\n\t\t\t\toverlap_weight = float(self.lossconf['overlap_weight'])\n\t\t\t\tvad_sum = tf.reduce_sum(targets['vad_targets'], -1)\n\t\t\t\tbin_weights = tf.where(\n\t\t\t\t\tvad_sum > 1,\n\t\t\t\t\ttf.ones([self.batch_size, time_dim]) * overlap_weight,\n\t\t\t\t\ttf.ones([self.batch_size, time_dim]))\n\t\t\t\tbin_weights = tf.expand_dims(bin_weights, -1) # broadcast the frame weights to all bins\n\t\t\t\tnorm = tf.reduce_sum(bin_weights) * tf.to_float(feat_dim)\n\t\t\telse:\n\t\t\t\tbin_weights = None\n\t\t\t\tnorm = tf.to_float(tf.reduce_sum(seq_length) * feat_dim)\n\n\t\t\tloss = ops.base_pit_loss(reconstructions, S, bin_weights=bin_weights, overspeakererized=False, do_square=do_square)\n\n\t\treturn loss, norm", "def add_guide_alignment(self):\n test_sam = self.get_signalalign_events(sam=True)\n events = self.get_resegment_basecall()\n cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam,\n kmer_index=self.kmer_index)\n for i, block in enumerate(cigar_labels):\n # print(block)\n self.aligned_signal.add_label(block, name=\"guide_alignment{}\".format(i), label_type='guide')\n return True", "def get_pseudo_label(self, anchors, box_cls_batch, box_delta_batch, gt_instances, scale_weight, enforce_back=False, back_thre=0.3, fore_thre=0.7, IOU_thre=0.5):\n with torch.no_grad():\n anchors = type(anchors[0]).cat(anchors).tensor\n device = anchors.device\n N = len(gt_instances)\n weight_flatten = [torch.ones((permute_to_N_HWA_K(x, self.num_classes)).shape[0:2]).to(device)*scale_weight[i] for i, x in enumerate(box_cls_batch)]\n weight_flatten = torch.cat(weight_flatten, dim=1).view(-1)\n pred_logits_collect = []\n pred_boxes_collect = []\n pseudo_target_logits_collect = []\n pseudo_target_boxes_collect = []\n weight_logits_collect = []\n weight_boxes_collect = []\n # For each image in the batch:\n for i in range(N):\n # Aggregate box_cls and box_delta for each scale.\n box_cls = [box_cls[i:i+1] for box_cls in box_cls_batch]\n box_delta = [box_delta[i:i+1] for box_delta in box_delta_batch]\n pred_class_logits, pred_anchor_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, self.num_classes) # Shapes: (N x R, K) and (N x R, 4), respectively.\n pred_box = self.box2box_transform.apply_deltas(pred_anchor_deltas, anchors)\n gt_boxes = gt_instances[i].gt_boxes\n gt_labels = gt_instances[i].gt_classes\n # Initial the pseudo_targets\n with torch.no_grad():\n pseudo_target_logits = pred_class_logits.clone().to(pred_class_logits.device)\n pseudo_target_logits = pseudo_target_logits.sigmoid()\n pseudo_target_boxes = pred_box.clone().to(pred_box.device)\n # Step 1: For each object, assgin groud-truth to the predicted boxes of the highest IoU, to prevent the case that there are missing detections.\n # For convenience, we use Matcher provided by D2 to achieve this step. We use a high fore_thre to get the highest IoU match.\n matcher = Matcher([back_thre, fore_thre], [-1, 0, 1], allow_low_quality_matches=True)\n with torch.no_grad():\n match_quality_matrix = pairwise_iou(gt_boxes, Boxes(anchors))\n matched_idxs, anchor_labels = matcher(match_quality_matrix)\n del match_quality_matrix\n # Assign groud-truth predictions to the selected anchors.\n selected_anchor = anchor_labels == 1\n pseudo_target_logits[selected_anchor] = 0\n pseudo_target_logits[selected_anchor, gt_labels[matched_idxs[selected_anchor]]] = 1\n pseudo_target_boxes[selected_anchor] = gt_boxes.tensor[matched_idxs[selected_anchor]]\n # If enforce_back is enabled, background-anchors are also included in the pseudo-labels.\n # background-anchors are anchors which are far away from any objects.\n # By enableing enforce_back, we enforce the background-anchors to detect nothing.\n if enforce_back:\n background_idxs = anchor_labels == -1\n pseudo_target_logits[background_idxs] = 0\n pseudo_back_logits = pseudo_target_logits[background_idxs].clone().to(pseudo_target_logits.device)\n pred_class_back_logits = pred_class_logits[background_idxs]\n weight_back_logits = weight_flatten[background_idxs]\n\n # Step 2: Conduct NMS process, filter out eliminated dectections.\n # Only apply constraints on detections kept after NMS.\n logits_sigmoid = pseudo_target_logits.flatten()\n num_topk = min(self.topk_candidates, pseudo_target_boxes.size(0))\n predicted_prob, topk_idxs = logits_sigmoid.sort(descending=True)\n predicted_prob = predicted_prob[: num_topk]\n topk_idxs = topk_idxs[:num_topk]\n keep_idxs = predicted_prob > self.score_threshold\n predicted_prob = predicted_prob[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n anchor_idxs = topk_idxs // self.num_classes\n\n pseudo_target_logits = pseudo_target_logits[anchor_idxs]\n pseudo_target_boxes = pseudo_target_boxes[anchor_idxs]\n pred_box = pred_box[anchor_idxs]\n pred_class_logits = pred_class_logits[anchor_idxs]\n weight_logits = weight_flatten[anchor_idxs]\n weight_boxes = weight_flatten[anchor_idxs]\n gt_labels = gt_instances[i].gt_classes\n\n # Step 3: Match the rest detections with the ground-truth objects and assign pseudo-targets based on the matching.\n # If IoU > IOU_thre, assign ground-truth cls and box as the target.\n # Else, assign background as targets.\n matcher = Matcher([IOU_thre], [0, 1], allow_low_quality_matches=False)\n\n match_quality_matrix = pairwise_iou(gt_boxes, Boxes(pseudo_target_boxes))\n matched_idxs, anchor_labels = matcher(match_quality_matrix)\n del match_quality_matrix\n\n target = torch.zeros(((anchor_labels == 1).sum(), 80), dtype=pred_box.dtype, device=pred_box.device)\n target[torch.arange((anchor_labels == 1).sum()), gt_labels[matched_idxs[anchor_labels == 1]]] = 1.0\n pseudo_target_logits[anchor_labels == 1] = target\n pseudo_target_boxes[anchor_labels == 1] = gt_boxes.tensor[matched_idxs[anchor_labels == 1]]\n pseudo_target_boxes = pseudo_target_boxes[anchor_labels == 1]\n pred_box = pred_box[anchor_labels == 1]\n pseudo_target_logits[anchor_labels == 0] = 0\n weight_boxes = weight_boxes[anchor_labels == 1]\n if enforce_back:\n pseudo_target_logits = torch.cat([pseudo_back_logits, pseudo_target_logits], dim=0)\n pred_class_logits = torch.cat([pred_class_back_logits, pred_class_logits], dim=0)\n weight_logits = torch.cat([weight_back_logits, weight_logits], dim=0)\n pseudo_target_boxes_collect.append(pseudo_target_boxes)\n pseudo_target_logits_collect.append(pseudo_target_logits)\n pred_boxes_collect.append(pred_box)\n pred_logits_collect.append(pred_class_logits)\n weight_logits_collect.append(weight_logits)\n weight_boxes_collect.append(weight_boxes)\n return torch.cat(pred_logits_collect), torch.cat(pred_boxes_collect), torch.cat(pseudo_target_logits_collect), torch.cat(pseudo_target_boxes_collect), torch.cat(weight_logits_collect), torch.cat(weight_boxes_collect)", "def __init__(self, args, normalization_mean, normalization_std,\n style_img, content_img, content_weight=1, style_weight=1000000):\n super(ArtNet, self).__init__()\n\n self.args = args\n\n self.style_img = style_img\n self.content_img = content_img\n\n self.content_layers = ['conv_4']\n self.style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\n # mean and std used for normalization\n self.normalization_mean = normalization_mean\n self.normalization_std = normalization_std\n\n # weights of content image and style image\n self.content_weight = args.content_weight if args else content_weight\n self.style_weight = args.style_weight if args else style_weight\n\n # initialize vgg19 pre-trained model\n self.model = vgg19(pretrained=True).features.to(device).eval()", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def __init__(\r\n self,\r\n centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments are the model\r\n normalization: float = 0.1, # <- parameters of the Gaussian.\r\n rate: float = 0.01,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.rate = rate", "def __init__(self, anchors: list = None,\n masks: list = None, image_res: int = constants.YOLO_DEFAULT_IMAGE_RES,\n tiny: bool = False, iou_threshold: float = 0.5,\n score_threshold: float = 0.5):\n self.iou_threshold = iou_threshold\n self.score_threshold = score_threshold\n self.image_res = image_res\n self.tiny = tiny\n if tiny:\n self.model_name = constants.Tiny_YOLOv3\n self.anchors = constants.TINY_YOLO_DEFAULT_ANCHORS if anchors is None else anchors\n self.masks = constants.TINY_YOLO_DEFAULT_ANCHOR_MASKS if masks is None else masks\n else:\n self.model_name = constants.YOLOv3\n self.anchors = constants.YOLO_DEFAULT_ANCHORS if anchors is None else anchors\n self.masks = constants.YOLO_DEFAULT_ANCHOR_MASKS if masks is None else masks\n\n self.n_classes, self.train_model, self.inference_model = None, None, None\n self.checkpoints_path, self.logs_path, self.figs_path = None, None, None\n if np.max(self.anchors) > 1:\n raise Exception(\"The anchors must be normalized\")", "def __init__(self, ebunch=None):\n super(BayesianModel, self).__init__()\n if ebunch:\n self.add_edges_from(ebunch)\n self.cpds = []\n self.cardinalities = self.get_cardinality()\n self.probs = dict()", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def anchor_preds(self, preds, train_anchor_inds, image_offset):\n assert train_anchor_inds.size(1) == 4\n tai = train_anchor_inds.data.clone()\n tai[:, 0] -= image_offset\n train_regions = gather_nd(preds, tai)\n class_preds = train_regions[:, :2]\n box_preds = train_regions[:, 2:]\n return class_preds, box_preds", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def guide(c, feats):\n vector = vec.transform(extract_features(c, feats))\n predicted_tags = sorted([i for i in zip(clf.best_estimator_.predict_proba(vector), clf.best_estimator_.classes_)], reverse=True)\n\n # get a list of tags allowed for this configuration\n possible_tags = get_morph_label(c).split('$')\n index_of_best_tag = 0 # nothing found case\n\n for tag in predicted_tags:\n if tag in possible_tags:\n index_of_best_tag = possible_tags.index(tag)\n break\n\n # return the analysis corresponding to the best tagset\n analyses = c.sentence[c.buffer[0]][1]\n return analyses[index_of_best_tag]", "def __init__(self, cfg):\n super(MHBCoAtt, self).__init__()\n self.cfg = cfg\n # word embedding: q_vocab_size, 1024\n self.word_embedding = nn.Embedding(cfg.q_vocab_size, cfg.emb_dim)\n # LSTM\n if cfg.glove:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim*2,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n else:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n\n self.dropout_l = nn.Dropout(p = 0.3)\n # question attention\n self.ques_att_conv1 = nn.Conv2d(cfg.hidden_dim, 512, [1,1])\n self.ques_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # question attentive feature fuse with image feature, according to paper: k * o = 5000, k = 5\n self.ques_proj1 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_conv1d = nn.Conv2d(cfg.img_feature_channel, 5000, [1, 1])\n self.dropout_m = nn.Dropout(p = 0.1)\n\n # co-attention conv layers\n self.co_att_conv1 = nn.Conv2d(1000, 512, [1,1])\n self.co_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # co_attentive feature fuse with question attentive feature\n self.ques_proj2 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.ques_proj3 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_proj2 = nn.Linear(2*cfg.img_feature_channel, 5000)\n self.img_proj3 = nn.Linear(2*cfg.img_feature_channel, 5000)\n\n # prediction fully connected layer\n self.linear_pred = nn.Linear(2000, cfg.a_vocab_size)", "def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n A = num_anchors\n total_anchors = all_anchors.shape[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # map of shape (..., H, W)\n height, width = rpn_cls_score.shape[1:3]\n\n # only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -_allowed_border) &\n (all_anchors[:, 1] >= -_allowed_border) &\n (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors[:, 3] < im_info[0] + _allowed_border) # height\n )[0]\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps[gt_argmax_overlaps,\n np.arange(overlaps.shape[1])]\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = np.where(labels == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)\n bg_inds = np.where(labels == 0)[0]\n if len(bg_inds) > num_bg:\n disable_inds = npr.choice(\n bg_inds, size=(len(bg_inds) - num_bg), replace=False)\n labels[disable_inds] = -1\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # only the positive ones have regression targets\n bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0)\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n np.sum(labels == 0))\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)\n labels = labels.reshape((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights", "def forward(self, x):\n # Introspect number of classes from anchors and input shape.\n num_anchors = len(self.anchors)\n batch_size, num_predictions, h, w = x.shape\n num_classes = int(num_predictions / num_anchors) - 5\n x = x.reshape((batch_size, num_anchors, num_classes + 5, h, w))\n\n # Indices 0-3 correspond to xywh energies, index 4 corresponds to\n # objectness energy, and 5: correspond to class energies.\n xywh_energy = x[:, :, 0:4, :, :]\n obj_energy = x[:, :, 4:5, :, :]\n class_energy = x[:, :, 5:, :, :]\n\n bbox_xywh = xywh_energy.clone().detach()\n\n # Cell offsets C_x and C_y from original paper.\n cx = torch.linspace(0, w - 1, w, device=self.device).repeat(h, 1)\n cy = torch.linspace(\n 0, h - 1, h, device=self.device\n ).repeat(w, 1).t().contiguous()\n\n # Get bbox center x and y coordinates.\n bbox_xywh[:, :, 0, :, :].sigmoid_().add_(cx).div_(w)\n bbox_xywh[:, :, 1, :, :].sigmoid_().add_(cy).div_(h)\n\n # Anchor priors (P_w and P_h in original paper).\n anchors = self.anchors\n\n anchor_w = torch.tensor(\n anchors, device=self.device\n )[:, 0].reshape(1, num_anchors, 1, 1)\n\n anchor_h = torch.tensor(\n anchors, device=self.device\n )[:, 1].reshape(1, num_anchors, 1, 1)\n\n # Get bbox width and height.\n bbox_xywh[:, :, 2, :, :].exp_().mul_(anchor_w)\n bbox_xywh[:, :, 3, :, :].exp_().mul_(anchor_h)\n\n # Get objectness and class scores.\n obj_score = obj_energy.clone().detach().sigmoid()\n class_score = F.softmax(class_energy.clone().detach(), dim=2)\n\n class_prob, class_idx = torch.max(class_score, 2, keepdim=True)\n class_prob.mul_(obj_score)\n\n # Flatten resulting tensors along anchor box and grid cell dimensions;\n # this makes it easier to combine predictions across scales from other\n # YOLO layers in Darknet.forward().\n # `bbox_xywh` -> (batch_size x num_predictions x 4) tensor, where last\n # dim corresponds to xywh coordinates of prediction.\n # `class_prob`, `class_idx` -> (batch_size x num_predictions)\n # tensors, where dim 1 corresponds to the probability and index,\n # respectively, of the class with the greatest probability.\n bbox_xywh = bbox_xywh.permute(0, 1, 3, 4, 2).reshape(batch_size, -1, 4)\n class_prob = class_prob.reshape(batch_size, -1)\n class_idx = class_idx.reshape(batch_size, -1)\n\n return bbox_xywh, class_prob, class_idx", "def main(DATASET='campbell', N_AGE_MIX=1):\n files = glob(f'resources/SN*_{DATASET}_chain.tsv')\n N_SNE = len(files)\n # end = -11 - len(DATASET)\n # get the numbers after the SN.\n snids = map(lambda x: re.search('(?<=SN)\\d*', x).group(0), files)\n snids = list(map(int, snids))\n\n\n model = GaussianMixture(N_AGE_MIX)\n amplitudes = np.zeros((N_SNE, N_AGE_MIX))\n means = np.zeros((N_SNE, N_AGE_MIX))\n stds = np.zeros((N_SNE, N_AGE_MIX))\n\n print(f'Fitting ages to {N_AGE_MIX} Gaussians')\n pdf = PdfPages(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation_preview.pdf')\n\n for i, f in enumerate(files):\n data = np.genfromtxt(f, delimiter='\\t')\n data = data[:, 7]\n\n model.fit(np.expand_dims(data, 1))\n\n amplitudes[i] = model.weights_.reshape(N_AGE_MIX)\n means[i] = model.means_.reshape(N_AGE_MIX)\n stds[i] = np.sqrt(model.covariances_).reshape(N_AGE_MIX)\n\n plt.figure()\n plt.hist(data, bins=np.linspace(-5, 20, 200))\n plt.hist(model.sample(1020000)[0], alpha=0.5, bins=np.linspace(-5, 20, 200))\n plt.title(f)\n \n pdf.savefig()\n plt.close()\n\n if (i+1)%10 == 0:\n print(f'Finished with the {i+1}th age fit')\n\n pdf.close()\n\n # if DATASET != 'both':\n ages = np.column_stack((snids, amplitudes, means, stds))\n # todo update the header to match the number of Gaussians used.\n np.savetxt(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation.csv', ages, delimiter=',',\n header='sn id, amp_1, amp_2, amp_3, mean_1, mean_2, mean_2, std_1, std_2, std_3')\n \n print(f'Done with {N_AGE_MIX} Gaussian mixture for {DATASET}.')", "def __init__(self, idim, odim_tgt, odim_src, args, ignore_id=-1):\n torch.nn.Module.__init__(self)\n if args.transformer_attn_dropout_rate is None:\n args.transformer_attn_dropout_rate = args.dropout_rate\n\n # special tokens and model dimensions\n self.pad = 0\n self.sos_tgt = odim_tgt - 1\n self.eos_tgt = odim_tgt - 1\n self.sos_src = odim_src - 1\n self.eos_src = odim_src - 1\n self.odim_tgt = odim_tgt\n self.odim_src = odim_src\n self.idim = idim\n self.adim = args.adim\n self.ignore_id = ignore_id\n\n # submodule\n self.mtlalpha = getattr(args, \"mtlalpha\", 0.0)\n self.asr_weight = getattr(args, \"asr_weight\", 0.0)\n self.mt_weight = getattr(args, \"mt_weight\", 0.0)\n self.num_decoders = getattr(args, \"num_decoders\", 2)\n self.do_st = getattr(args, \"do_st\", True)\n self.do_mt = getattr(args, \"do_mt\", self.mt_weight > 0.0)\n self.do_asr = self.asr_weight > 0 and self.mtlalpha < 1\n\n # cross-attention parameters\n self.cross_weight = getattr(args, \"cross_weight\", 0.0)\n self.cross_self = getattr(args, \"cross_self\", False)\n self.cross_src = getattr(args, \"cross_src\", False)\n self.cross_operator = getattr(args, \"cross_operator\", None)\n self.cross_to_asr = getattr(args, \"cross_to_asr\", False)\n self.cross_to_st = getattr(args, \"cross_to_st\", False)\n self.wait_k_asr = getattr(args, \"wait_k_asr\", 0)\n self.wait_k_st = getattr(args, \"wait_k_st\", 0)\n self.cross_src_from = getattr(args, \"cross_src_from\", \"embedding\")\n self.cross_self_from = getattr(args, \"cross_self_from\", \"embedding\")\n self.cross_shared = getattr(args, \"cross_shared\", False)\n self.cross_weight_learnable = getattr(args, \"cross_weight_learnable\", False)\n\n # one-to-many models parameters\n self.use_joint_dict = getattr(args, \"use_joint_dict\", True)\n self.one_to_many = getattr(args, \"one_to_many\", False)\n self.use_lid = getattr(args, \"use_lid\", False)\n if self.use_joint_dict:\n self.langs_dict = getattr(args, \"langs_dict_tgt\", None)\n self.lang_tok = getattr(args, \"lang_tok\", None)\n self.lang_tok_mt = getattr(args, \"lang_tok_mt\", None)\n\n self.subsample = get_subsample(args, \n mode='mt' if self.do_mt else 'st', \n arch='transformer')\n self.reporter = MTReporter() if self.do_mt else Reporter() \n self.normalize_before = getattr(args, \"normalize_before\", True)\n\n # Backward compatability\n if self.cross_operator in [\"sum\", \"concat\"]:\n if self.cross_self and self.cross_src:\n self.cross_operator = \"self_src\" + self.cross_operator\n elif self.cross_self:\n self.cross_operator = \"self_\" + self.cross_operator\n elif self.cross_src:\n self.cross_operator = \"src_\" + self.cross_operator\n if self.cross_operator:\n assert self.cross_operator in ['self_sum', 'self_concat', 'src_sum', \n 'src_concat', 'self_src_sum', 'self_src_concat']\n\n # Check parameters\n if self.one_to_many:\n self.use_lid = True\n if not self.do_st:\n assert (not self.cross_to_asr) and (not self.cross_to_st)\n if self.cross_operator and 'sum' in self.cross_operator and self.cross_weight <= 0:\n assert (not self.cross_to_asr) and (not self.cross_to_st)\n if self.cross_to_asr or self.cross_to_st:\n assert self.do_st and self.do_asr\n assert self.cross_self or self.cross_src\n assert bool(self.cross_operator) == (self.do_asr and (self.cross_to_asr or self.cross_to_st))\n if self.cross_src_from != \"embedding\" or self.cross_self_from != \"embedding\":\n assert self.normalize_before\n if self.wait_k_asr > 0:\n assert self.wait_k_st == 0\n elif self.wait_k_st > 0:\n assert self.wait_k_asr == 0\n else:\n assert self.wait_k_asr == 0\n assert self.wait_k_st == 0\n\n logging.info(\"*** Cross attention parameters ***\")\n if self.cross_to_asr:\n logging.info(\"| Cross to ASR\")\n if self.cross_to_st:\n logging.info(\"| Cross to ST\")\n if self.cross_self:\n logging.info(\"| Cross at Self\")\n if self.cross_src:\n logging.info(\"| Cross at Source\")\n if self.cross_to_asr or self.cross_to_st:\n logging.info(f'| Cross operator: {self.cross_operator}')\n logging.info(f'| Cross sum weight: {self.cross_weight}')\n if self.cross_src:\n logging.info(f'| Cross source from: {self.cross_src_from}')\n if self.cross_self:\n logging.info(f'| Cross self from: {self.cross_self_from}')\n logging.info(f\"Use joint dictionary: {self.use_joint_dict}\")\n \n if (self.cross_src_from != \"embedding\" and self.cross_src) \\\n and (not self.normalize_before):\n logging.warning(f'WARNING: Resort to using \\\n self.cross_src_from == embedding for cross at source attention.')\n if (self.cross_self_from != \"embedding\" and self.cross_self) \\\n and (not self.normalize_before):\n logging.warning(f'WARNING: Resort to using \\\n self.cross_self_from == embedding for cross at self attention.')\n\n # Adapters\n self.use_adapters = getattr(args, \"use_adapters\", False)\n self.use_adapters_in_enc = getattr(args, \"use_adapters_in_enc\", False)\n adapter_names = getattr(args, \"adapters\", None)\n adapter_reduction_factor = getattr(args, \"adapter_reduction_factor\", None)\n adapter_reduction_factor_enc = getattr(args, \"adapter_reduction_factor_enc\", adapter_reduction_factor)\n use_adapters_for_asr = getattr(args, \"use_adapters_for_asr\", True)\n adapter_before_src_attn = getattr(args, \"adapter_before_src_attn\", False)\n adapter_after_mha = getattr(args, \"adapter_after_mha\", False)\n use_shared_adapters = getattr(args, \"use_shared_adapters\", False)\n use_shared_adapters_enc = getattr(args, \"use_shared_adapters_enc\", False)\n # if self.use_adapters and not use_adapters_for_asr:\n # assert not self.do_asr or \\\n # (self.do_asr and self.num_decoders != 1) or \\\n # (self.do_asr and not self.do_st) # for backward compatibility\n\n if adapter_names:\n if self.do_asr and not self.do_st:\n adapter_names = [str(args.char_list_src.index(f'<2{l}>')) for l in adapter_names]\n else:\n adapter_names = [str(args.char_list_tgt.index(f'<2{l}>')) for l in adapter_names]\n logging.info(f'| adapters = {adapter_names}')\n\n if self.do_st or self.do_asr:\n logging.info(f'Speech encoder')\n self.encoder = Encoder(\n idim=idim,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.eunits,\n num_blocks=args.elayers,\n input_layer=getattr(args, \"transformer_input_layer\", \"conv2d\"),\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n attention_dropout_rate=args.transformer_attn_dropout_rate,\n adapter_names=adapter_names if self.use_adapters_in_enc else None,\n reduction_factor=adapter_reduction_factor_enc,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters_enc,\n )\n if self.do_st:\n logging.info('ST decoder')\n self.decoder = Decoder(\n odim=odim_tgt,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n cross_operator=self.cross_operator if self.cross_to_st else None,\n cross_shared=self.cross_shared,\n cross_weight_learnable=self.cross_weight_learnable,\n cross_weight=self.cross_weight,\n use_output_layer=True if (self.use_joint_dict or \\\n (self.do_st and not self.do_asr)) else False,\n adapter_names=adapter_names,\n reduction_factor=adapter_reduction_factor,\n adapter_before_src_attn=adapter_before_src_attn,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters,\n )\n if self.do_asr:\n logging.info('ASR decoder')\n self.decoder_asr = Decoder(\n odim=odim_src,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n cross_operator=self.cross_operator if self.cross_to_asr else None,\n cross_shared=self.cross_shared,\n cross_weight_learnable=self.cross_weight_learnable,\n cross_weight=self.cross_weight,\n use_output_layer=True if (self.use_joint_dict or \\\n (self.do_asr and not self.do_st)) else False,\n adapter_names=adapter_names,\n reduction_factor=adapter_reduction_factor,\n adapter_before_src_attn=adapter_before_src_attn,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters,\n )\n if self.num_decoders == 1 and self.do_st:\n logging.info('*** Use shared decoders *** ')\n self.decoder_asr = self.decoder\n\n if not self.use_joint_dict and (self.do_st and self.do_asr):\n self.output_layer = torch.nn.Linear(args.adim, odim_tgt)\n self.output_layer_asr = torch.nn.Linear(args.adim, odim_src)\n\n # submodule for MT task\n if self.do_mt:\n logging.info('MT encoder')\n self.encoder_mt = Encoder(\n idim=odim_src,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n input_layer='embed',\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n attention_dropout_rate=args.transformer_attn_dropout_rate,\n padding_idx=0\n )\n if not self.do_st:\n logging.info('MT decoder')\n self.decoder_mt = Decoder(\n odim=odim_tgt,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n use_output_layer=True,\n )\n self.reset_parameters(args) # place after the submodule initialization\n if self.mtlalpha > 0.0:\n self.ctc = CTC(odim_src, args.adim, args.dropout_rate, \n ctc_type=args.ctc_type, reduce=True,\n zero_infinity=True)\n else:\n self.ctc = None\n\n if self.asr_weight > 0 and (args.report_cer or args.report_wer):\n from espnet.nets.e2e_asr_common import ErrorCalculator\n self.error_calculator = ErrorCalculator(args.char_list_src,\n args.sym_space, args.sym_blank,\n args.report_cer, args.report_wer)\n elif self.do_mt and getattr(args, \"report_bleu\", False):\n from espnet.nets.e2e_mt_common import ErrorCalculator\n self.error_calculator = ErrorCalculator(args.char_list_tgt,\n args.sym_space,\n args.report_bleu)\n else:\n self.error_calculator = None\n self.rnnlm = None\n\n # criterion\n if self.do_st:\n self.criterion_st = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n if self.do_asr:\n self.criterion_asr = LabelSmoothingLoss(self.odim_src, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n if self.do_mt:\n self.criterion_mt = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n self.normalize_length = args.transformer_length_normalized_loss # for PPL\n\n # Language embedding layer\n if self.lang_tok == \"encoder-pre-sum\":\n self.language_embeddings = build_embedding(self.langs_dict, self.idim, \n padding_idx=self.pad)\n logging.info(f'language_embeddings: {self.language_embeddings}')\n\n # Backward compatability\n if self.cross_operator:\n if \"sum\" in self.cross_operator:\n self.cross_operator = \"sum\"\n if \"concat\" in self.cross_operator: \n self.cross_operator = \"concat\"", "def eval_pos_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n annotations_dir = root_dir + 'annotations/'\n affine_dir = root_dir + \"affine_orig/\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n image_file = os.listdir(os.path.join(seq_dir, seq))[0]\n image = cv2.imread(os.path.join(seq_dir, seq, image_file))\n orig_h, orig_w = image.shape[:2]\n\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for i in range(len(bbox)):\n # convert to std resolution\n bbox[i][:, 0] = bbox[i][:, 0]\n bbox[i][:, 1] = bbox[i][:, 1]\n bbox[i][:, 2] = bbox[i][:, 2]\n bbox[i][:, 3] = bbox[i][:, 3]\n\n # for j in range(bbox[i].shape[0]):\n # bbox[i][j] = tlwh_to_tlbr(bbox[i][j])\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n # i-1 to i M\n frame_name = \"{:07d}.jpg\".format(int(frame_id[idx][i-1]))\n M = affine_dict[frame_name]\n bbox_infer = tlwh(mean)\n bbox_infer = tlwh_to_tlbr(bbox_infer)\n bbox_expand = np.ones((3, 4))\n bbox_expand[:2, 0] = bbox_infer[:2]\n bbox_expand[:2, 1] = bbox_infer[2:]\n # tr\n bbox_expand[:2, 2] = bbox_infer[2], bbox_infer[1]\n # bl\n bbox_expand[:2, 3] = bbox_infer[0], bbox_infer[3]\n bbox_expand = np.dot(M, bbox_expand)\n for t in range(bbox_expand.shape[1]):\n bbox_expand[:2, t] /= bbox_expand[2, t]\n # bbox_infer[:2] = bbox_expand[:2, 0]\n # bbox_infer[2:] = bbox_expand[:2, 1]\n # get the out bounding bbox\n bbox_infer[0] = min(bbox_expand[0, :])\n bbox_infer[1] = min(bbox_expand[1, :])\n bbox_infer[2] = max(bbox_expand[0, :])\n bbox_infer[3] = max(bbox_expand[1, :])\n bbox_infer = tlbr_to_tlwh(bbox_infer)\n # print(bbox_infer)\n trace_predict_bbox.append(bbox_infer)\n # move = mean[:4] - tlwh_to_xyah(bbox_infer)\n # if np.sum(np.square(move)[:2]) > 32*32:\n # print(move)\n # print(idx, frame_name)\n # print(mean)\n mean[:4] = tlwh_to_xyah(bbox_infer)\n # print(mean)\n mean, covariance = kalman_filter.predict(mean, covariance)\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i) * 100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('iou(*100)')\n plt.show()", "def generate_image_stylegan2ada(model: Any, generation_options) -> dict:\n G = model\n truncation_psi = generation_options.truncation\n seed = generation_options.seed\n\n if not seed:\n seed = random.randint(0, 2 ** 32 - 1) # 2**32-1 is the highest seed value\n\n img, w = seed_to_array_image(G, seed, truncation_psi)\n\n image_blob = save_image_as_bytes(img)\n w_blob = save_vector_as_bytes(w)\n\n return {\"result_image\": (image_blob, w_blob)}", "def _clip_iqa_get_anchor_vectors(\n model_name_or_path: str,\n model: _CLIPModel,\n processor: _CLIPProcessor,\n prompts_list: List[str],\n device: Union[str, torch.device],\n) -> Tensor:\n if model_name_or_path == \"clip_iqa\":\n text_processed = processor(text=prompts_list)\n anchors_text = torch.zeros(\n len(prompts_list), processor.tokenizer.model_max_length, dtype=torch.long, device=device\n )\n for i, tp in enumerate(text_processed[\"input_ids\"]):\n anchors_text[i, : len(tp)] = torch.tensor(tp, dtype=torch.long, device=device)\n\n anchors = model.encode_text(anchors_text).float()\n else:\n text_processed = processor(text=prompts_list, return_tensors=\"pt\", padding=True)\n anchors = model.get_text_features(\n text_processed[\"input_ids\"].to(device), text_processed[\"attention_mask\"].to(device)\n )\n return anchors / anchors.norm(p=2, dim=-1, keepdim=True)", "def __init__(self, A, sigma_s_x, sigma_g_x, sigma_s_y, sigma_g_y):\n\n self._mode_x = GaussianSchellModel1D(A**0.5, sigma_s_x, sigma_g_x)\n self._mode_y = GaussianSchellModel1D(A**0.5, sigma_s_y, sigma_g_y)\n\n # For eigenvalue ordering.\n self._sorted_mode_indices = None", "def forward_test(self, img, img_metas, **kwargs):\n labels = {}\n labels['trans_inv'] = kwargs['trans_inv']\n labels['intrinsic_param'] = kwargs['intrinsic_param']\n labels['joint_root'] = kwargs['joint_root']\n labels['depth_factor'] = kwargs['depth_factor']\n labels['target_uvd_29'] = kwargs['target_uvd_29']\n labels['target_xyz_24'] = kwargs['target_xyz_24']\n labels['target_weight_24'] = kwargs['target_weight_24']\n labels['target_weight_29'] = kwargs['target_weight_29']\n labels['target_xyz_17'] = kwargs['target_xyz_17']\n labels['target_weight_17'] = kwargs['target_weight_17']\n labels['target_theta'] = kwargs['target_theta']\n labels['target_beta'] = kwargs['target_beta']\n labels['target_smpl_weight'] = kwargs['target_smpl_weight']\n labels['target_theta_weight'] = kwargs['target_theta_weight']\n labels['target_twist'] = kwargs['target_twist']\n labels['target_twist_weight'] = kwargs['target_twist_weight']\n\n bboxes = kwargs['bbox']\n\n for k, _ in labels.items():\n labels[k] = labels[k].cuda()\n\n trans_inv = labels.pop('trans_inv')\n intrinsic_param = labels.pop('intrinsic_param')\n joint_root = labels.pop('joint_root')\n depth_factor = labels.pop('depth_factor')\n if len(depth_factor.shape) != 2:\n depth_factor = torch.unsqueeze(depth_factor, dim=1)\n\n if self.backbone is not None:\n img = img.cuda().requires_grad_()\n features = self.backbone(img)\n features = features[0]\n else:\n features = img['features']\n\n if self.neck is not None:\n features = self.neck(features)\n\n output = self.head(features, trans_inv, intrinsic_param, joint_root,\n depth_factor, self.smpl)\n\n pred_uvd_jts = output['pred_uvd_jts']\n batch_num = pred_uvd_jts.shape[0]\n pred_xyz_jts_24 = output['pred_xyz_jts_24'].reshape(batch_num, -1,\n 3)[:, :24, :]\n pred_xyz_jts_24_struct = output['pred_xyz_jts_24_struct'].reshape(\n batch_num, 24, 3)\n pred_xyz_jts_17 = output['pred_xyz_jts_17'].reshape(batch_num, 17, 3)\n pred_mesh = output['pred_vertices'].reshape(batch_num, -1, 3)\n\n pred_xyz_jts_24 = pred_xyz_jts_24.cpu().data.numpy()\n pred_xyz_jts_24_struct = pred_xyz_jts_24_struct.cpu().data.numpy()\n pred_xyz_jts_17 = pred_xyz_jts_17.cpu().data.numpy()\n pred_uvd_jts = pred_uvd_jts.cpu().data\n pred_mesh = pred_mesh.cpu().data.numpy()\n pred_pose = output['pred_pose'].cpu().data.numpy()\n pred_beta = output['pred_shape'].cpu().data.numpy()\n\n assert pred_xyz_jts_17.ndim in [2, 3]\n pred_xyz_jts_17 = pred_xyz_jts_17.reshape(pred_xyz_jts_17.shape[0], 17,\n 3)\n pred_uvd_jts = pred_uvd_jts.reshape(pred_uvd_jts.shape[0], -1, 3)\n pred_xyz_jts_24 = pred_xyz_jts_24.reshape(pred_xyz_jts_24.shape[0], 24,\n 3)\n pred_scores = output['maxvals'].cpu().data[:, :29]\n\n hm_shape = [64, 64]\n pose_coords_list = []\n for i in range(pred_xyz_jts_17.shape[0]):\n bbox = bboxes[i].tolist()\n pose_coords, _ = heatmap2coord(\n pred_uvd_jts[i],\n pred_scores[i],\n hm_shape,\n bbox,\n mean_bbox_scale=None)\n pose_coords_list.append(pose_coords)\n\n all_preds = {}\n all_preds['vertices'] = pred_mesh\n all_preds['smpl_pose'] = pred_pose\n all_preds['smpl_beta'] = pred_beta\n all_preds['xyz_17'] = pred_xyz_jts_17\n all_preds['uvd_jts'] = pose_coords\n all_preds['xyz_24'] = pred_xyz_jts_24_struct\n image_path = []\n for img_meta in img_metas:\n image_path.append(img_meta['image_path'])\n all_preds['image_path'] = image_path\n all_preds['image_idx'] = kwargs['sample_idx']\n return all_preds", "def update_alpha(model):\n a = model.params.alpha_prior[0]\n b = model.params.alpha_prior[1]\n\n alpha_old = model.params.alpha\n\n log_p_old = model.feat_alloc_dist.log_p(model.params)\n\n alpha_new = scipy.stats.gamma.rvs(a, scale=(1 / b))\n\n model.params.alpha = alpha_new\n\n log_p_new = model.feat_alloc_dist.log_p(model.params)\n\n if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, 0, 0):\n model.params.alpha = alpha_new\n\n else:\n model.params.alpha = alpha_old", "def _generate_anchors(point, sizes, aspect_ratios, layout, beta, include_depth):\n\n distance = point[2]\n base_size = sizes[0]\n scales = sizes[1:] / base_size\n # beta = 8\n scales = (beta/distance)*scales\n\n center = (point[0], point[1])\n anchor = np.array([center[0] - base_size/2.0, center[1] - base_size/2.0,\n center[0] + base_size/2.0, center[1] + base_size/2.0],\n dtype=np.float)\n\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n\n all_anchors = np.empty((0,4))\n for l in layout:\n new_anchors = _shift_anchors(anchors, l)\n all_anchors = np.vstack((all_anchors, new_anchors))\n\n if int(include_depth)==1:\n # Add the distance as the 5th element to all anchors\n new_shape = (all_anchors.shape[0], all_anchors.shape[1]+1)\n new_anchors = np.ones(new_shape) * distance\n new_anchors[:,:-1] = all_anchors\n all_anchors = new_anchors\n\n return all_anchors", "def __init__(self, anchors: torch.Tensor, num_classes: int):\n super(YoloHead, self).__init__()\n self.anchors :torch.Tensor = anchors\n self.num_classes : int = num_classes", "def yolo_head_base(features, anchors, num_classes, input_shape):\n\n dtype = K.dtype(features)\n num_anchors = len(anchors)\n\n grid, grid_shape = construct_grid(features)\n\n # Reshape anchors and features\n anchors_shape = [1, 1, 1, num_anchors, 2] # batch, height, width, num_anchors, box_params\n anchors_tensor = K.reshape(K.constant(anchors), anchors_shape)\n features_shape = [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]\n features = K.reshape(features, features_shape)\n\n # Adjust predictions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(features[..., :2]) + grid) / K.cast(grid_shape[::-1], dtype)\n box_wh = K.exp(features[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], dtype)\n\n return grid, features, box_xy, box_wh", "def test_anchor_output():\n anchorDict = {\n \"ReplicationFactor\": 2,\n # Accl factor must divide batch size\n \"AccumulationFactor\": 4,\n \"Pipelining\": True,\n \"ReturnType\": \"ALL\",\n }\n label_array = np.ones([BATCH_SIZE]).astype(np.int32)\n\n micro_batch_size = BATCH_SIZE // (\n anchorDict[\"AccumulationFactor\"] * anchorDict[\"ReplicationFactor\"]\n )\n\n builder = popart.Builder()\n input_shape = [micro_batch_size, CHANNELS, DATA_LEN, DATA_LEN]\n\n data_shape = popart.TensorInfo(\"FLOAT\", input_shape)\n lbl_shape = popart.TensorInfo(\"INT32\", [micro_batch_size])\n w = builder.addInitializedInputTensor(\n np.random.random_sample(input_shape).astype(np.float32)\n )\n\n ip = builder.addInputTensor(data_shape)\n lb = builder.addInputTensor(lbl_shape)\n\n a = builder.aiOnnx.matmul([ip, w])\n o = builder.reshape_const(\n builder.aiOnnx, [a], [micro_batch_size, CHANNELS * DATA_LEN * DATA_LEN]\n )\n o = builder.aiOnnx.relu([o])\n o = builder.aiOnnx.softmax([o])\n nll = builder.aiGraphcore.nllloss([o, lb])\n\n GRAD = popart.reservedGradientPrefix() + w\n ACCL = popart.reservedAccumPrefix() + w\n art = popart.AnchorReturnType(\"All\")\n data_flow = popart.DataFlow(\n BATCHES_PER_STEP, {o: art, a: art, ip: art, w: art, GRAD: art, ACCL: art}\n )\n\n opts, deviceContext = return_options(anchorDict)\n with deviceContext as device:\n if device is None:\n pytest.skip(\"Test needs to run on IPU, but none are available\")\n\n session = popart.TrainingSession(\n fnModel=builder.getModelProto(),\n dataFlow=data_flow,\n loss=nll,\n optimizer=popart.ConstSGD(LEARNING_RATE),\n userOptions=opts,\n deviceInfo=device,\n )\n\n session.prepareDevice()\n\n if anchorDict[\"ReplicationFactor\"] > 1:\n input_shape = [anchorDict[\"ReplicationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"ReplicationFactor\"], -1])\n if anchorDict[\"AccumulationFactor\"] > 1:\n input_shape = [anchorDict[\"AccumulationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"AccumulationFactor\"], -1])\n if BATCHES_PER_STEP > 1:\n input_shape = [BATCHES_PER_STEP] + input_shape\n label_array = np.repeat(label_array[np.newaxis], BATCHES_PER_STEP, 0)\n\n anchors = session.initAnchorArrays()\n in_array = np.random.random_sample(input_shape).astype(np.float32)\n\n stepio = popart.PyStepIO({ip: in_array, lb: label_array}, anchors)\n session.weightsFromHost()\n\n session.run(stepio)\n\n # Returned anchors will be of shape\n # [bps, grad_accl_factor, repl_factor, micro_batch_size, channels, data_len, data_len]\n for batch in range(anchors[w].shape[0]):\n for replica in range(anchors[w].shape[1]):\n # Weights should not change over the gradient accumulation\n # dimension - only after gradAccl steps.\n assert np.allclose(\n anchors[w][batch, 0, :, :, :, :, :],\n anchors[w][batch, replica, :, :, :, :, :],\n )\n\n # Check that the accumulated gradient plus the weights for the current batch\n # equals the weights for the next batch.\n # Batch loop\n for batch in range(anchors[w].shape[0] - 1):\n calc_weight = {}\n # Replica loop.\n for replica in range(anchors[w].shape[2]):\n # For each replica in each batch, take the relevant replica's\n # last weight tensor in the accumulation loop minus\n # the sum of the accumulated gradients across replicas\n calc_weight[replica] = anchors[w][\n batch, -1, replica, :, :, :, :\n ] - np.sum(anchors[ACCL][batch, -1, :, :, :, :, :], axis=0)\n # Then compare against the last weight tensor of the next batch,\n # for the relevant replica. These should match.\n assert np.allclose(\n calc_weight[replica], anchors[w][batch + 1, -1, replica, :, :, :, :]\n )", "def main():\n # initialize the class labels and set the seed of the pseudorandom\n # number generator so we can reproduce our results\n labels = [\"dog\", \"cat\", \"panda\"]\n np.random.seed(1)\n\n # be * learned * by our model, but for the sake of this example, let's use random values\n W = np.random.randn(3, 3072)\n b = np.random.randn(3)\n\n # load our example image, resize it, and then flatten it into our\n # \"feature vector\" representation\n orig = cv2.imread(\"beagle.png\")\n image = cv2.resize(orig, (32, 32)).flatten()\n\n # compute the output scores by taking the dot product between the\n # weight matrix and image pixels, followed by adding in the b\n scores = W.dot(image) + b\n\n # loop over the scores + labels and display them\n for (label, score) in zip(labels, scores):\n print(\"[INFO] {}: {:.2f}\".format(label, score))\n\n # draw the label with the highest score on the image as our prediction\n cv2.putText(\n orig, \"Label: {}\".format(labels[np.argmax(scores)]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2\n )\n\n # display our input image\n cv2.imshow(\"Image\", orig)\n cv2.waitKey(0)", "def __init__(self, likelihood, model):\n if not isinstance(likelihood, GaussianLikelihood):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)", "def loss_random_sampling(anchor, positive, negative, anchor_swap = False, margin = 1.0, loss_type = \"triplet_margin\"):\n\n assert anchor.size() == positive.size(), \"Input sizes between positive and negative must be equal.\"\n assert anchor.size() == negative.size(), \"Input sizes between positive and negative must be equal.\"\n assert anchor.dim() == 2, \"Inputd must be a 2D matrix.\"\n eps = 1e-8\n (pos, d_a_n, d_p_n) = distance_vectors_pairwise(anchor, positive, negative)\n\n # distance based anchor, if anchor swap, get the min(anchor, positive)\n if anchor_swap:\n min_neg = torch.min(d_a_n, d_p_n)\n else:\n min_neg = d_a_n\n\n if loss_type == \"triplet_margin\":\n # the func is (m + D_p - D_n)\n loss = torch.clamp(margin + pos - min_neg, min=0.0)\n elif loss_type == 'softmax':\n # here the output is 2-class log-softmax loss(1/0) from L2Net\n exp_pos = torch.exp(2.0 - pos)\n exp_den = exp_pos + torch.exp(2.0 - min_neg) + eps\n loss = - torch.log( exp_pos / exp_den )\n elif loss_type == 'contrastive':\n loss = torch.clamp(margin - min_neg, min=0.0) + pos\n else: \n print ('Unknown loss type. Try triplet_margin, softmax or contrastive')\n sys.exit(1)\n loss = torch.mean(loss)\n return loss", "def _fit_amoeba(self, kwargs, verbose):\n\n args_init = self._param_class.kwargs_to_args(kwargs)\n\n options = {\n \"adaptive\": True,\n \"fatol\": self._tol_simplex_func,\n \"maxiter\": self._simplex_n_iterations * len(args_init),\n }\n\n method = \"Nelder-Mead\"\n\n if verbose:\n print(\"starting amoeba... \")\n\n opt = minimize(\n self.fast_rayshooting.chi_square,\n x0=args_init,\n method=method,\n options=options,\n )\n\n kwargs = self._param_class.args_to_kwargs(opt[\"x\"])\n source_penalty = opt[\"fun\"]\n\n return kwargs, source_penalty", "def align(self, image, landmark_indices, anchor_points, size=96):\n # Detect face in image and find landmarks\n box = self.detect(image)\n landmarks = self.find_landmarks(image, box)\n\n # Select three points in the landmarks(Eyes and nose)\n points_in_image = landmarks[landmark_indices]\n points_in_image = points_in_image.astype('float32')\n # Generate the normalized output size\n output_size = anchor_points * size\n\n # Calculates the 2 \\times 3 matrix of an affine transform\n affine_transf = cv2.getAffineTransform(points_in_image, output_size)\n\n # Transforms the source image using the specified matrix\n transformed_img = cv2.warpAffine(image, affine_transf, (size, size))\n\n return transformed_img", "def apply_deformation(\n points: Tensor,\n nodes: Tensor,\n transforms: Tensor,\n anchor_indices: Tensor,\n anchor_weights: Tensor,\n eps: float = 1e-6,\n) -> Tensor:\n anchor_weights = anchor_weights / (anchor_weights.sum(dim=1, keepdim=True) + eps) # (N, K)\n anchor_masks = torch.ne(anchor_indices, -1) # (N, K)\n p_indices, col_indices = torch.nonzero(anchor_masks, as_tuple=True) # (C), (C)\n n_indices = anchor_indices[p_indices, col_indices] # (C)\n weights = anchor_weights[p_indices, col_indices] # (C)\n sel_points = points[p_indices] # (C, 3)\n sel_nodes = nodes[n_indices] # (C, 3)\n sel_transforms = transforms[n_indices] # (C, 4, 4)\n sel_warped_points = apply_transform(sel_points - sel_nodes, sel_transforms) + sel_nodes # (C, 3)\n sel_warped_points = sel_warped_points * weights.unsqueeze(1) # (C, 3)\n warped_points = torch.zeros_like(points) # (N, 3)\n p_indices = p_indices.unsqueeze(1).expand_as(sel_warped_points) # (C, 3)\n warped_points.scatter_add_(dim=0, index=p_indices, src=sel_warped_points) # (N, 3)\n return warped_points", "def mult_reads_gmm(reads, training_reads, components):\n\n\tprediction_zero_100 = 0\n\tprediction_one_100 = 0\n\tprediction_zero_200 = 0\n\tprediction_one_200 = 0\n\n\tbase_opts = ['A', 'C', 'G', 'T']\n\n\n\tmodel = mixture.GMM(n_components=components, covariance_type='spherical')\n\tnum_reads = len(reads)\n\n\ttraining_reads = [read.get_read().replace('\\'', '') for read in training_reads]\n\n\tread_input = [read.get_read().replace('\\'', '') for read in reads]\n\t# alignment_inputs = []\n\t# alignment_inputs.extend(read.get_alignments())\n\n\t# Generates observations\n\t# bases are converted to their ascii character values\n\tread_list = []\n\tfor read in read_input:\n\t\tread_char = [convert_letter(c) for c in read]\n\t\tread_list.append(read_char)\n\n\tobservations = []\n\t\n\tfor alignment in training_reads:\n\t\talignment_list = [convert_letter(c) for c in alignment] \n\t\tobservations.append( alignment_list )\n\t# for base_index, base in enumerate(read_main):\n\t# \tbase_observations = [ord(base)]\n\t# \tfor alignment in alignments:\n\t# \t\tbase_observations.append(ord(alignment[base_index]))\n\n\t# \tobservations.append(base_observations)\n\n\tmodel.fit(observations)\n\tmeans = np.round(model.means_, 2)\n\tcovars = np.round(model.covars_, 2)\n\tconverted_means = []\n\tfor num_list in means:\n\t\t# convert to nearest acceptable letter\n\t\t#char_means = [chr(int(n)) for n in num_list]\n\t\tchar_means = [convert_to_letter(n) for n in num_list]\n\t\tconverted_means.append(char_means)\n\t\n\tpredictions = model.predict(read_list)\n\n\tread_predictions = []\n\tfor index, prediction in enumerate(predictions):\n\t\tmapping = [prediction, reads[index]]\n\t\tread_predictions.append(mapping)\n\t\n\n\tfor read_pr in read_predictions:\n\t\t\n\t\tprediction = read_pr[0]\n\t\t# def filt(x): return x[0] == prediction\n\t\t# matches = filter(filt, read_predictions)\n\t\tpr = prediction\n\t\trps = int(float(read_pr[1].get_position()))\n\t\t# print '\\n'\n\t\t# print prediction\n\t\t# print 'Converted Means: '\n\t\t# print ''.join(converted_means[prediction])\n\t\t# print 'Actual Read'\n\t\t# print read_pr[1].get_read()\n\t\t# print read_pr[1].get_position()\n\t\t# print 'Matches'\n\t\t# for m in matches:\n\t\t# \tprint m[1].get_read() + ' Position: ' + m[1].get_position()\n\t\t# \tm[1].print_read()\n\n\t\tif pr == 0:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_zero_100 = prediction_zero_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_zero_200 = prediction_zero_200 + 1\n\t\t\t\t\n\t\telse:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_one_100 = prediction_one_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_one_200 = prediction_one_200 + 1\n\t\t\t\t\n\n\tprint '\\n-------------Predictions---------------------'\n\tprint 'Prediction: 0 Position: 100 Num: ' + str(prediction_zero_100)\n\tprint 'Prediction: 1 Position: 100 Num: ' + str(prediction_one_100)\n\tprint 'Prediction: 0 Position: 200 Num: ' + str(prediction_zero_200)\n\tprint 'Prediction: 1 Position: 200 Num: ' + str(prediction_one_200)\n\n\tprint '\\n------Means: -----------'\n\tfor mean in converted_means:\n\t\tprint ''.join(mean) \n\n\t# for index, prediction in enumerate(predictions):\n\t# \tprint 'Read: '\n\t# \tprint reads[index].get_read()\n\t# \tprint 'Prediction: '\n\t# \tprint prediction\n\t# \tprint converted_means[prediction]\n\t# \tprint 'Means: '\n\t# \tprint means[prediction]\n\t# \tprint covars[prediction]\n\t# \tprint '----------------------------------------\\n'\n\n\n\t# posteriors = model.predict_proba(read_list)\n\t# print model.get_params(deep=True)\n\t# sample = model.sample()\n\t# print [convert_to_letter(n) for n in sample[0]]", "def __getitem__(self, index):\n vocab = self.vocab\n \n img_name = self.img_list[index]\n targets = self.ann[img_name]['caption']\n img_cats = self.ann[img_name]['label']\n img_labels = self.get_onehot_labels([self.cat2label[a] for a in img_cats]) # re\n\n image = Image.open(os.path.join(self.img_root, img_name+'.jpg')).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n \n if len(targets) > self.num_ann_onebatch:\n # picked_idx = np.random.choice(len(targets), self.num_ann_onebatch, replace=False)\n # picked_idx.sort()\n picked_idx = np.arange(self.num_ann_onebatch)\n targets = [targets[i] for i in picked_idx]\n\n elif len(targets) < self.num_ann_onebatch:\n how_many_left = self.num_ann_onebatch - len(targets)\n picked_idx = []\n while how_many_left > 0:\n k = min(how_many_left, len(targets))\n picked_idx += [a for a in range(k)]\n how_many_left -= k\n # picked_idx = np.random.choice(len(targets), how_many_left, replace=how_many_left>len(targets))\n # picked_idx.sort()\n targets = targets + [targets[i] for i in picked_idx]\n assert(len(targets) == self.num_ann_onebatch)\n\n # Convert caption (string) to word ids.\n captions = []\n for target in targets: \n target = target.replace('.','').replace(', ', ' ')\n tokens = nltk.tokenize.word_tokenize(str(target).lower())\n # caption.append(vocab('<start>')) # we no need to start \n caption = [vocab(token) for token in tokens]\n captions.append(caption)\n\n return image, captions, img_labels", "def __getitem__(self, index):\n \"\"\"\n caption_a = item[\"caption\"]\n imageID = item[\"image_id\"]\n\n if self.expanded and index >= self.train_size:\n coco = self.coco_val\n else:\n coco = self.coco\n\n rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])\n\n if self.args.get(\"two_sentence\", True):\n if random.random() > 0.5:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]\n flag = True\n\n caption_b = item_b[\"caption\"]\n subword_tokens_a = self.tokenizer.tokenize(caption_a)\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)\n elif not self.args.get(\"no_next_sentence\", False):\n if random.random() < self.args.false_caption_ratio:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = item\n flag = True\n\n caption_b = item_b[\"caption\"]\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)\n else:\n caption_b = item[\"caption\"]\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)\n\n bert_feature = InputFeatures.convert_one_example_to_features_pretraining(\n example = bert_example,\n tokenizer=self.tokenizer,\n probability = self.masked_lm_prob)\n bert_feature.insert_field_into_dict(sample)\n \"\"\"\n\n if (self.image_feature_type == 'kinetics-r2c'):\n return self.__getitem_detector__(index)\n else:\n return self.__getitem_image__(index)", "def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None", "def _drawOneHMM(d, ad, xLoc, yLoc, modelSizeX, modelSizeY):\n \n image = [([0] * modelSizeX) for i in range(modelSizeY)]\n \n numImages = len(ad)\n if numImages > 100:\n numImages = 100\n \n for i in range(numImages):\n #Sum all positions\n for j in range(len(ad[i])):\n for k in range(modelSizeX):\n image[j][k] += (int(ad[i][j]) & 2**k) >> k\n \n \n for j in range(len(image)):\n for k in range(len(image[0])):\n image[j][k] /= (numImages * 1.0)\n \n #Draw the image.\n for j in range(len(image)):\n for k in range(len(image[0])):\n #c = getColor(image[j][k])\n d.point((xLoc + k, yLoc + j), \\\n fill = (255.0 * image[j][len(image[0]) - 1 - k], \\\n 255.0 * image[j][len(image[0]) - 1 - k], \\\n 255.0 * image[j][len(image[0]) - 1 - k]))", "def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))", "def bias_model_qso(z):\n alpha = 0.278\n beta = 2.393\n return alpha * ((1+z)**2 - 6.565) + beta", "def yolo_head_sigmoid(features, anchors, num_classes, input_shape):\n\n _, features, box_xy, box_wh = yolo_head_base(features, anchors, num_classes, input_shape)\n\n box_confidence = K.sigmoid(features[..., 4:5])\n box_class_probabilities = K.sigmoid(features[..., 5:])\n\n return box_xy, box_wh, box_confidence, box_class_probabilities", "def res_aamfb_50(imagenet_name=False, **kwargs):\n if imagenet_name:\n imagenet_name = 'resnet50'\n else:\n imagenet_name = None\n model = res_AAMFB(Bottleneck, [3, 4, 6, 3], **kwargs)\n model.load_pretrained_weights(imagenet_name)\n return model", "def predictor(path):\n # get keypoints from the image in a DF\n TEST_keypoints = []\n path = cv2.cvtColor(path, cv2.COLOR_BGR2RGB)\n img = movenet_inference_flat_v10(hub_model, path)\n TEST_keypoints.append(img)\n TEST_keypoints_df = pd.DataFrame(TEST_keypoints)\n\n # Rename columns in the DataFrames according to the values\n columns = []\n for point in kp_descriptions:\n for value in ('y', 'x', 'score'):\n columns.append(f'{point}_{value}')\n\n TEST_keypoints_df.columns = columns\n \n # add additional positional features\n TEST_keypoints_df = add_pos_features(TEST_keypoints_df, drop_scores=True)\n # predict the asana\n prediction_existing = model_fl.predict(TEST_keypoints_df)\n # initialize the predicted_asana to 107 (no asan found)\n predicted_asana = 107\n\n # assign the precited asana if accuracy more than threshold (12.5%)\n for i in range(1):\n mx = 0\n mx_label = -1\n for j in range(107):\n if(prediction_existing[i, j] > mx):\n mx_label = j\n mx = prediction_existing[i, j]\n predicted_asana = mx_label\n predicted_accuracy = prediction_existing[0, mx_label]\n if(predicted_accuracy < 0.125):\n predicted_asana = 107\n\n # print(predicted_asana)\n \n # find label from the json\n a = inv_map[str(predicted_asana)]\n # b = \"null\"\n\n print(\"predicted pose --> \", a)\n print(\"confidence = \", predicted_accuracy)\n # print(\"actual pose -->\", b)\n return a, img", "def res_aamfb_34(imagenet_name=False, **kwargs):\n if imagenet_name:\n imagenet_name = 'resnet34'\n else:\n imagenet_name = None\n model = res_AAMFB(BasicBlock, [3, 4, 6, 3], **kwargs)\n model.load_pretrained_weights(imagenet_name)\n return model" ]
[ "0.6148643", "0.5812919", "0.5794156", "0.54624885", "0.53823245", "0.5287905", "0.52859586", "0.5282076", "0.52552813", "0.5201943", "0.51982266", "0.5182037", "0.51819533", "0.515083", "0.5144859", "0.5127486", "0.5112405", "0.509996", "0.5075668", "0.50655454", "0.5056296", "0.50549775", "0.5051599", "0.50323814", "0.5027042", "0.5014503", "0.5008731", "0.50082034", "0.49899963", "0.49871254", "0.49779287", "0.49509516", "0.49437767", "0.49372688", "0.4936293", "0.4936109", "0.4935963", "0.49350965", "0.49296018", "0.49126855", "0.49038562", "0.49024415", "0.4885883", "0.4882145", "0.48756415", "0.4867796", "0.4866264", "0.4847417", "0.48356158", "0.48341137", "0.48322093", "0.4829396", "0.48290098", "0.48284975", "0.4821248", "0.48153788", "0.48127723", "0.48116112", "0.48087457", "0.47959903", "0.4794919", "0.4793187", "0.47930676", "0.47895014", "0.47870466", "0.47833702", "0.47831795", "0.47728667", "0.4770975", "0.47701982", "0.47695625", "0.47601858", "0.47591466", "0.474555", "0.474539", "0.47432736", "0.47409475", "0.47284803", "0.47260648", "0.47225517", "0.4719098", "0.4718302", "0.4715433", "0.47152662", "0.4714641", "0.4706368", "0.4705684", "0.46987033", "0.46955654", "0.4687744", "0.4679993", "0.46778053", "0.4677499", "0.46756527", "0.467557", "0.46747544", "0.46694595", "0.4668529", "0.46665472", "0.46639982" ]
0.5076744
18
r"""Selfattention model variant from `"Learned Image Compression with Discretized Gaussian Mixture Likelihoods and Attention Modules"
def cheng2020_attn(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 6: raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)') return _load_model( "cheng2020-attn", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_attention_model(self):\n inputs = self.prepare_inputs()\n \n features = self.prepare_features(inputs)\n support_history_context_concat_emb = features['support_history_context_concat_emb']\n support_link_info_concat_emb = features['support_link_info_concat_emb']\n support_start_node_info_concat_emb = features['support_start_node_info_concat_emb']\n support_end_node_info_concat_emb = features['support_end_node_info_concat_emb']\n support_future_context_concat_emb = features['support_future_context_concat_emb']\n support_neighbor_link_info_concat_emb = features['support_neighbor_link_info_concat_emb']\n support_neighbor_start_node_info_concat_emb = features['support_neighbor_start_node_info_concat_emb']\n support_neighbor_end_node_info_concat_emb = features['support_neighbor_end_node_info_concat_emb']\n support_neighbor_link_state_concat_emb = features['support_neighbor_link_state_concat_emb']\n support_y_embed = features['support_duration_delta_concat_emb']\n\n query_history_context_concat_emb = features['query_history_context_concat_emb']\n query_link_info_concat_emb = features['query_link_info_concat_emb']\n query_start_node_info_concat_emb = features['query_start_node_info_concat_emb']\n query_end_node_info_concat_emb = features['query_end_node_info_concat_emb']\n query_future_context_concat_emb = features['query_future_context_concat_emb']\n query_neighbor_link_info_concat_emb = features['query_neighbor_link_info_concat_emb']\n query_neighbor_start_node_info_concat_emb = features['query_neighbor_start_node_info_concat_emb']\n query_neighbor_end_node_info_concat_emb = features['query_neighbor_end_node_info_concat_emb']\n query_neighbor_link_state_concat_emb = features['query_neighbor_link_state_concat_emb']\n \n support_x = self.constgat({\n 'history_context': support_history_context_concat_emb,\n 'link_info': support_link_info_concat_emb,\n 'start_node_info': support_start_node_info_concat_emb,\n 'end_node_info': support_end_node_info_concat_emb,\n 'future_context': support_future_context_concat_emb,\n 'neighbor_link_info': support_neighbor_link_info_concat_emb,\n 'neighbor_start_node_info': support_neighbor_start_node_info_concat_emb,\n 'neighbor_end_node_info': support_neighbor_end_node_info_concat_emb,\n 'neighbor_link_state': support_neighbor_link_state_concat_emb})\n\n query_x = self.constgat({\n 'history_context': query_history_context_concat_emb,\n 'link_info': query_link_info_concat_emb,\n 'start_node_info': query_start_node_info_concat_emb,\n 'end_node_info': query_end_node_info_concat_emb,\n 'future_context': query_future_context_concat_emb,\n 'neighbor_link_info': query_neighbor_link_info_concat_emb,\n 'neighbor_start_node_info': query_neighbor_start_node_info_concat_emb,\n 'neighbor_end_node_info': query_neighbor_end_node_info_concat_emb,\n 'neighbor_link_state': query_neighbor_link_state_concat_emb})\n query_x = query_x + layers.reduce_sum(inputs['query_mask']) * 0.0 + layers.reduce_sum(inputs['support_mask']) * 0.0 # IMPORTANT: for save_inference_model\n\n def forward_attention(indicator, support_x, support_y_embed, support_mask, query_x, query_y, query_mask):\n \"\"\"\n support_indicator: length = support_len\n if attention(support, query), indicator = 0\n if attention(support, support), indicator = 1\n \"\"\"\n support_y_embed = support_y_embed * support_mask\n support_xy = layers.concat([support_x, support_y_embed, indicator], axis=1)\n\n pad_value = layers.assign(input=numpy.array([0.0], dtype=numpy.float32))\n support_pad, support_len = layers.sequence_pad(support_xy, pad_value=pad_value)\n query_pad, query_len = layers.sequence_pad(query_x, pad_value=pad_value)\n\n attention = self.attention(query_pad, support_pad, support_pad, self.hidden_dim, 'meta')\n attention = layers.sequence_unpad(attention, length=query_len)\n pred_input = layers.concat([attention, query_x], axis=1)\n\n pred = self.prepare_preds_with_name(pred_input, 'out_pred')\n label = layers.cast(query_y, dtype='float32')\n label = layers.scale(label, scale=0.01)\n\n loss = layers.huber_loss(pred, label, 1.0) * query_mask\n loss = layers.mean(loss)\n return pred, label, loss\n\n indicator = support_y_embed * 0.0\n pred, label, loss1 = forward_attention(\n indicator, support_x, support_y_embed, inputs['support_mask'], \n query_x, inputs['query_duration_delta'], 1.0)\n indicator = support_y_embed * 1.0\n _, _, loss2 = forward_attention(\n indicator, support_x, support_y_embed, inputs['support_mask'], \n support_x, inputs['support_duration_delta'] - 120, (inputs['support_mask'] * (-1.0) + 1))\n loss = loss1 + loss2\n return pred, label, loss", "def __init__(self, cfg):\n super(MHBCoAtt, self).__init__()\n self.cfg = cfg\n # word embedding: q_vocab_size, 1024\n self.word_embedding = nn.Embedding(cfg.q_vocab_size, cfg.emb_dim)\n # LSTM\n if cfg.glove:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim*2,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n else:\n self.lstm = nn.LSTM(input_size=cfg.emb_dim,\n hidden_size=cfg.hidden_dim,\n num_layers=cfg.num_layers,\n batch_first=True)\n\n self.dropout_l = nn.Dropout(p = 0.3)\n # question attention\n self.ques_att_conv1 = nn.Conv2d(cfg.hidden_dim, 512, [1,1])\n self.ques_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # question attentive feature fuse with image feature, according to paper: k * o = 5000, k = 5\n self.ques_proj1 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_conv1d = nn.Conv2d(cfg.img_feature_channel, 5000, [1, 1])\n self.dropout_m = nn.Dropout(p = 0.1)\n\n # co-attention conv layers\n self.co_att_conv1 = nn.Conv2d(1000, 512, [1,1])\n self.co_att_conv2 = nn.Conv2d(512, 2, [1,1])\n\n # co_attentive feature fuse with question attentive feature\n self.ques_proj2 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.ques_proj3 = nn.Linear(2*cfg.hidden_dim, 5000)\n self.img_proj2 = nn.Linear(2*cfg.img_feature_channel, 5000)\n self.img_proj3 = nn.Linear(2*cfg.img_feature_channel, 5000)\n\n # prediction fully connected layer\n self.linear_pred = nn.Linear(2000, cfg.a_vocab_size)", "def _add_bert_self_attention_layer(model_1, model_2):\n\n class Config:\n pass\n\n config = Config()\n config.hidden_size = model_1.all_head_size + model_2.all_head_size\n config.num_attention_heads = (\n model_1.num_attention_heads + model_2.num_attention_heads\n )\n config.output_attentions = model_1.output_attentions\n config.attention_probs_dropout_prob = model_1.dropout.p\n\n return BertSelfAttention(config)", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def build_model_multihead_attention_multiscaleCNN4_covermore(self,\n dim_attention,headnum,\n embedding_vec,\n load_weights=False, weight_dir=None,\n nb_filters=32,filters_length1=1,\n filters_length2=5,\n filters_length3=10,\n pooling_size=3,\n drop_input=0,\n drop_cnn=0.2,\n drop_flat=0,\n W1_regularizer=0.005,\n W2_regularizer=0.005,\n Att_regularizer_weight=0.0005,\n BatchNorm=False,\n fc_dim = 50,\n fcnum=0,\n posembed=False,\n pos_dmodel=40,\n pos_nwaves = 20,\n posmod = 'concat',\n regularfun=1,\n huber_delta=1,\n activation='gelu',\n activationlast='gelu',\n add_avgpooling = False,\n poolingmod=1,\n normalizeatt=False,\n regressionmodel=False,\n attmod = \"softmax\",\n sharp_beta=1,\n lr = 0.001 \n ):\n ###print('Advanced Masking')\n def mask_func(x):\n return x[0] * x[1]\n \n ###print(posembed)\n ###print(posmod)\n input = Input(shape=(self.max_len,), dtype='int8')\n input_mask = Input(shape=([int(self.max_len/pooling_size), 1]), dtype='float32')\n embedding_layer = Embedding(len(embedding_vec), len(embedding_vec[0]), weights=[embedding_vec],\n input_length=self.max_len,\n trainable=False)\n embedding_output = Dropout(drop_input)(embedding_layer(input)) #layer2\n if 'gelu' in activation:\n activationfun=gelu\n else:\n activationfun = 'relu'\n \n if 'gelu' in activationlast:\n activationlastfun = gelu\n else:\n activationlastfun='relu'\n \n ###print(activationfun)\n ###print(activationlastfun)\n with tf.name_scope('first_cnn'):\n first_cnn = Convolution1D(nb_filters, filters_length1, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN1')(embedding_output) #layer3\n first_cnn2 = Convolution1D(int(nb_filters/2), filters_length1, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(first_cnn) #layer5\n second_cnn = Convolution1D(nb_filters, filters_length2, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN2')(embedding_output) #layer4\n second_cnn2 = Convolution1D(int(nb_filters/2), filters_length2, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(second_cnn)\n third_cnn = Convolution1D(int(nb_filters/2), filters_length3, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN3')(embedding_output)\n \n third_cnn2 = Convolution1D(int(nb_filters/2), filters_length3, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(third_cnn)\n if BatchNorm:\n first_cnn2 = BatchNormalization()(first_cnn2)\n second_cnn2 = BatchNormalization()(second_cnn2)\n third_cnn2 = BatchNormalization()(third_cnn2)\n \n if not add_avgpooling:\n if poolingmod == 1:\n pooling_layer = MaxPooling1D(pool_length=pooling_size, stride=pooling_size)\n else:\n pooling_layer = AveragePooling1D(pool_length=pooling_size, stride=pooling_size)\n \n cnn_output1 = Dropout(drop_cnn)(pooling_layer(first_cnn2))\n cnn_output2 = Dropout(drop_cnn)(pooling_layer(second_cnn2))\n cnn_output3 = Dropout(drop_cnn)(pooling_layer(third_cnn2))\n else:\n first_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(first_cnn2)\n first_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(first_cnn2)\n cnn_output1 = Dropout(drop_cnn)(concatenate([first_cnn2_max,first_cnn2_avg],axis=-1))\n second_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(second_cnn2)\n second_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(second_cnn2)\n cnn_output2 = Dropout(drop_cnn)(concatenate([second_cnn2_max,second_cnn2_avg],axis=-1))\n third_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(third_cnn2)\n third_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(third_cnn2)\n cnn_output3 = Dropout(drop_cnn)(concatenate([third_cnn2_max,third_cnn2_avg],axis=-1))\n \n \n \n if posembed:\n ##print(posmod)\n from position_embedding import PositionEmbedding\n if posmod == 'concat':\n pos_emb1 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb1')(cnn_output1)\n cnn_output1 = concatenate([cnn_output1, pos_emb1], axis=-1)\n pos_emb2 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb2')(cnn_output2)\n cnn_output2 = concatenate([cnn_output2, pos_emb2], axis=-1)\n pos_emb3 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb3')(cnn_output3)\n cnn_output3 = concatenate([cnn_output3, pos_emb3], axis=-1)\n else:\n ##print(\"yes add posmod\")\n pos_emb1 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output1)[-1]/2), d_model=int_shape(cnn_output1)[-1],name='pos_emb1')(cnn_output1)\n cnn_output1 = Add()([cnn_output1, pos_emb1])\n pos_emb2 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output2)[-1]/2), d_model=int_shape(cnn_output2)[-1],name='pos_emb2')(cnn_output2)\n cnn_output2 = Add()([cnn_output2, pos_emb2])\n pos_emb3 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output3)[-1]/2), d_model=int_shape(cnn_output3)[-1],name='pos_emb3')(cnn_output3)\n cnn_output3 = Add()([cnn_output3, pos_emb3])\n \n mask_input1 = []\n mask_input1.append(cnn_output1)\n mask_input1.append(input_mask)\n cnn_mask_output1 = Lambda(mask_func)(mask_input1)\n del mask_input1\n mask_input2 = []\n mask_input2.append(cnn_output2)\n mask_input2.append(input_mask)\n cnn_mask_output2 = Lambda(mask_func)(mask_input2)\n del mask_input2\n mask_input3 = []\n mask_input3.append(cnn_output3)\n mask_input3.append(input_mask)\n cnn_mask_output3 = Lambda(mask_func)(mask_input3)\n del mask_input3\n \n if regularfun==1:\n regularizerfunction_W1 = regularizers.l1(W1_regularizer)\n regularizerfunction_W2 = regularizers.l1(W2_regularizer)\n elif regularfun==2:\n regularizerfunction_W1 = regularizers.l2(W1_regularizer)\n regularizerfunction_W2 = regularizers.l2(W2_regularizer)\n elif regularfun ==3:\n regularizerfunction_W1 = smoothL1(W1_regularizer,huber_delta)\n regularizerfunction_W2 = smoothL1(W2_regularizer,huber_delta)\n \n with tf.name_scope('multihead_attention'):\n att1,att1_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att1\")(concatenate([cnn_mask_output1, input_mask]))#-5 layer\n \n att2,att2_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att2\")(concatenate([cnn_mask_output2, input_mask])) #-4 layer\n \n att3,att3_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att3\")(concatenate([cnn_mask_output3, input_mask])) #-3 layer\n \n if BatchNorm:\n att1 = BatchNormalization()(att1)\n att2 = BatchNormalization()(att2)\n att3 = BatchNormalization()(att3)\n \n \n output = Dropout(drop_flat)(Flatten()(concatenate([att1,att2,att3]))) #-2 layer\n \n fc = output\n for _ in range(fcnum):\n fc = Dense(fc_dim,activation='relu')(fc)\n fc = Dropout(drop_flat)(fc)\n \n with tf.name_scope(''):\n if regressionmodel:\n preds = Dense(self.nb_classes,activation='softmax')(fc) #-1 layer\n else:\n preds = Dense(self.nb_classes,activation='sigmoid')(fc) #-1 layer\n \n self.model = Model(inputs=[input,input_mask], outputs=preds)\n from keras import optimizers\n # optim = optimizers.RMSprop()\n optim = optimizers.Adam(lr=lr, decay=5e-5) #The paper uses a decay rate alpha = alpha/sqrt(t) updted each epoch (t) for the logistic regression demonstration.\n #optim = optimizers.nadam()\n #optim = RAdam()\n if regressionmodel:\n self.model.compile(loss='kld',optimizer=optim,metrics=['acc'])\n else:\n self.model.compile(loss='binary_crossentropy',optimizer=optim,metrics=['binary_accuracy','categorical_accuracy'])\n \n \n \n if load_weights:\n self.model.load_weights(weight_dir)\n \n self.is_built = True\n self.bn = False\n self.model.summary()", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def __init__(self, emb_size: int = 768, num_heads: int = 12, dropout: float = 0.1):\n super().__init__()\n self.emb_size = emb_size\n self.num_heads = num_heads\n\n # Check \"head dim\" = number of features per head (d_k).\n self.head_dim = emb_size // num_heads\n assert self.head_dim * num_heads == self.emb_size, \"emb_size must be divisible by num_heads\"\n # Calculate scaling factor.\n self.scale = 1 / (self.head_dim ** 0.5)\n\n # V1: in vanilla self-attention Q,K,V are square matrices.\n # self.keys = nn.Linear(emb_size, emb_size)\n # self.queries = nn.Linear(emb_size, emb_size)\n # self.values = nn.Linear(emb_size, emb_size)\n \n # V2: single layer with emb_size, split into num (heads * head_dim) * 3 (Q,K,V).\n self.qkv = nn.Linear(emb_size, emb_size * 3)\n\n # Attention dropout.\n self.att_drop = nn.Dropout(dropout)\n self.projection = nn.Linear(emb_size, emb_size)", "def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)", "def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None", "def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):\n super().__init__()\n if d_model % n_heads != 0:\n raise ValueError(\"d_model must be divisible by n_heads, but got {} and {}\".format(d_model, n_heads))\n _d_per_head = d_model // n_heads\n # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation\n if not _is_power_of_2(_d_per_head):\n warnings.warn(\n \"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 \"\n \"which is more efficient in our CUDA implementation.\"\n )\n self.im2col_step = 64\n ###\n self.d_model = d_model\n self.n_levels = n_levels\n self.n_heads = n_heads\n self.n_points = n_points\n self.sampling_offsets = nn.Dense(d_model, n_heads * n_levels * n_points * 2)\n self.attention_weights = nn.Dense(d_model, n_heads * n_levels * n_points)\n self.value_proj = nn.Dense(d_model, d_model)\n self.output_proj = nn.Dense(d_model, d_model)", "def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def forward_train(self, img, img_metas, **kwargs):\n labels = {}\n labels['trans_inv'] = kwargs['trans_inv']\n labels['intrinsic_param'] = kwargs['intrinsic_param']\n labels['joint_root'] = kwargs['joint_root']\n labels['depth_factor'] = kwargs['depth_factor']\n labels['target_uvd_29'] = kwargs['target_uvd_29']\n labels['target_xyz_24'] = kwargs['target_xyz_24']\n labels['target_weight_24'] = kwargs['target_weight_24']\n labels['target_weight_29'] = kwargs['target_weight_29']\n labels['target_xyz_17'] = kwargs['target_xyz_17']\n labels['target_weight_17'] = kwargs['target_weight_17']\n labels['target_theta'] = kwargs['target_theta']\n labels['target_beta'] = kwargs['target_beta']\n labels['target_smpl_weight'] = kwargs['target_smpl_weight']\n labels['target_theta_weight'] = kwargs['target_theta_weight']\n labels['target_twist'] = kwargs['target_twist']\n labels['target_twist_weight'] = kwargs['target_twist_weight']\n # flip_output = kwargs.pop('is_flipped', None)\n\n for k, _ in labels.items():\n labels[k] = labels[k].cuda()\n\n trans_inv = labels.pop('trans_inv')\n intrinsic_param = labels.pop('intrinsic_param')\n joint_root = labels.pop('joint_root')\n depth_factor = labels.pop('depth_factor')\n\n if self.backbone is not None:\n img = img.cuda().requires_grad_()\n features = self.backbone(img)\n features = features[0]\n else:\n features = img['features']\n\n if self.neck is not None:\n features = self.neck(features)\n\n predictions = self.head(features, trans_inv, intrinsic_param,\n joint_root, depth_factor, self.smpl)\n\n losses = self.compute_losses(predictions, labels)\n\n return losses", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def _attention(self, inputs):\n attn_weights = K.batch_dot(x=inputs,\n y=K.permute_dimensions(inputs,\n pattern=(0, 2, 1)))\n return K.permute_dimensions(attn_weights, (0, 2, 1))", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)", "def generate(self, text, prev_mel):\n # forward pass through text embedding and get k and v\n kv = self.t_encoder(text)\n k = kv[:,:self.hp.d,:]\n v = kv[:,self.hp.d:,:]\n # forward pass through audio encoding and get Q\n q = self.a_encoder(prev_mel)\n \n # compute attention and use forcible incremental attention (section 4.2)\n a = (k.transpose(2,1)).matmul(q)/np.sqrt(self.hp.d)\n a = F.softmax(a, dim=1)\n \"\"\"\n # get argmax\n argmax = a[0].argmax(dim=0) # argmax on the N dimension\n # forcibly incremental attention\n preva = -1\n for i in range(a.shape[-1]):\n if argmax[i] < preva -1 or preva + 3 < argmax[i]:\n # force the ith column to be zero\n a[:,:,i] = 0\n # find correct position\n position = min(a.shape[1]-1, preva + 1)\n a[:,position,i] = 1.0\n # update preva\n preva = a[0,:,i].argmax()\"\"\"\n\n # finish computing y and a\n r = r = v.matmul(a)\n\n rprime = torch.cat((r, q), dim=1)\n ylogit = self.decoder(rprime)\n y = F.sigmoid(ylogit)\n return y, ylogit, a", "def get_attention(self, X):\n if self.bn:\n layer = 16\n else:\n layer = 14\n inputs = [K.learning_phase()] + [self.model.inputs[0]]\n _attention_f = K.function(inputs, [\n self.model.layers[layer].output])\n \n return _attention_f([0] + [X])", "def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)", "def forward(self, hg, samp_bias1=None, samp_bias2=None):\n h_1_all = [];h_2_all = [];c_all = [];logits = []\n result = {}\n # process features\n features = hg.srcdata['h']\n feats = self.normal_feat(features, self.meta_paths)\n # shuffled features\n shuf_feats = self.shuf_feats(feats)\n\n for idx, meta_path in enumerate(self.meta_paths):\n new_g = dgl.metapath_reachable_graph(hg, meta_path)\n for i in range(self.sc):\n new_g = dgl.add_self_loop(new_g)\n\n feats[idx] = F.dropout(feats[idx], self.dropout, training=self.training)\n shuf_feats[idx] = F.dropout(shuf_feats[idx], self.dropout, training=self.training)\n\n h_1 = self.gcn[idx](new_g, feats[idx])\n c = self.readout(h_1)\n c = self.readout_act_func(c)\n h_2 = self.gcn[idx](new_g, shuf_feats[idx])\n\n\n logit = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)\n h_1_all.append(h_1.unsqueeze(0))\n h_2_all.append(h_2.unsqueeze(0))\n c_all.append(c)\n logits.append(logit)\n result['logits'] = logits\n\n # Attention or not\n if self.isAttn:\n r\"\"\"\n .. math::\n \\begin{equation}\n \\mathbf{h}_{i}=\\mathcal{Q}\\left(\\left\\{\\mathbf{h}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\sum_{r \\in \\mathcal{R}} a_{i}^{(r)} \\mathbf{h}^{(r)}\n \\end{equation}\n\n where :math:`a_{i}^{(r)}` denotes the importance of relationr in generating the final embedding of node videfined as:\n\n .. math::\n \\begin{equation}\n a_{i}^{(r)}=\\frac{\\exp \\left(\\mathbf{q}^{(r)} \\cdot \\mathbf{h}_{i}^{(r)}\\right)}{\\sum_{r^{\\prime} \\in \\mathcal{R}} \\exp \\left(\\mathbf{q}^{\\left(r^{\\prime}\\right)} \\cdot \\mathbf{h}_{i}^{r^{\\prime}}\\right)}\n \\end{equation}\n \"\"\"\n\n h_1_all_lst = [];h_2_all_lst = [];c_all_lst = []\n for h_idx in range(self.nheads):\n h_1_all_, h_2_all_, c_all_ = self.attn[h_idx](h_1_all, h_2_all, c_all)\n h_1_all_lst.append(h_1_all_);h_2_all_lst.append(h_2_all_); c_all_lst.append(c_all_)\n\n h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0)\n h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0)\n\n else:\n h_1_all = torch.mean(torch.cat(h_1_all, 0), 0).unsqueeze(0)\n h_2_all = torch.mean(torch.cat(h_2_all, 0), 0).unsqueeze(0)\n\n # Lcs = [Z − AVG { H(r)|r∈ R }]^2 - [Z − AVG { ~H(r)|r∈ R }]^2\n pos_reg_loss = ((self.H - h_1_all) ** 2).sum()\n neg_reg_loss = ((self.H - h_2_all) ** 2).sum()\n reg_loss = pos_reg_loss - neg_reg_loss\n result['reg_loss'] = reg_loss\n\n # semi-supervised module\n if self.isSemi:\n r\"\"\"\n Extension to Semi-Supervised Learning\n\n .. math::\n \\begin{equation}\n \\ell_{\\text {sup }}=-\\frac{1}{\\left|\\mathcal{Y}_{L}\\right|} \\sum_{l \\in \\mathcal{Y}_{L}} \\sum_{i=1}^{c} Y_{l i} \\ln \\hat{Y}_{l i}\n \\end{equation}\n\n Where :math:`mathcal{Y}_{L}` is the set of node indices with labels\n \"\"\"\n semi = self.logistic(self.H).squeeze(0)\n result['semi'] = semi\n\n # result: ['logits','reg_loss','semi']\n return result", "def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)", "def __init__(self, inp, oup, kernel_size=3, stride=1, expand_ratio = 6, transform_mode='conv2d'):\n super(SpatialSelfAttention, self).__init__()\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.expand_ratio = expand_ratio\n self.identity = stride == 1 and inp == oup\n self.inp, self.oup = inp, oup\n self.high_dim_id = False\n\n if self.expand_ratio != 1:\n self.conv_exp = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(hidden_dim)\n if transform_mode == 'none':\n self.depth_sep_conv = nn.Conv2d(hidden_dim, hidden_dim, (kernel_size,kernel_size), (stride,stride), (1,1), groups=hidden_dim, bias=False)\n elif transform_mode == 'conv3d':\n self.row_conv = nn.Sequential(\n SpatialConv3d(hidden_dim, hidden_dim, kernel_size, reshape_dim=0, groups=hidden_dim),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True)\n )\n elif transform_mode == 'conv2d':\n self.row_conv = nn.Sequential(\n SpatialConv2d(hidden_dim, hidden_dim, kernel_size, reshape_dim=0, groups=hidden_dim),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True)\n )\n elif transform_mode == 'conv2+1d':\n self.row_conv = nn.Sequential(\n SpatialConv2d(hidden_dim, hidden_dim, kernel_size, reshape_dim=0, groups=hidden_dim),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n SpatialConv2d(hidden_dim, hidden_dim, kernel_size=(kernel_size[0], 1), reshape_dim=1, groups=hidden_dim),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True)\n )\n # self.bn2 = nn.BatchNorm2d(hidden_dim)\n\n self.conv_pro = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)\n self.bn3 = nn.BatchNorm2d(oup)\n\n self.relu = nn.ReLU6(inplace=True)", "def __init__(self, dim_input_hidden, n_class, use_cuda=False):\n super(RateDistAttentionGMM, self).__init__()\n self.dim_input_hidden = dim_input_hidden\n self.use_cuda = use_cuda\n self.n_class = n_class\n self.ffn1 = embed2Dist(dim_input_hidden, self.n_class)\n # self.ffn1 = nn.Linear(dim_input_hidden, self.n_class)\n # self.ffn2 = embed2Dist(self.n_class * 2, self.n_class)\n # self.W_H_D = Parameter(torch.FloatTensor(dim_input_hidden, self.n_class))\n self.reset_parameters()", "def __init__(self, args):\n super(DSRMM, self).__init__()\n\n self.wv=args.wv\n self.index_to_word=args.index_to_word\n\n self.input_dim=args.emsize\n self.device=args.device\n\n self.STR=args.STR\n\n self.nbins = args.nbins\n #self.bins = [-1.0, -0.5, 0, 0.5, 1.0, 1.0]\n self.bins = [-0.75, -0.25, 0.25, 0.75, 1.0, 1.0]\n\n self.gating_network = GatingNetwork(args.emsize)\n\n\n self.conv1 = nn.Conv2d(self.input_dim, args.k1, (3, 3), padding=1)\n self.conv2 = nn.Conv2d(self.input_dim, args.k2, (3, 5), padding=(1, 2))\n self.conv3 = nn.Conv2d(self.input_dim, args.k3, (3, 7), padding=(1, 3))\n self.relu = nn.ReLU()\n self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)\n self.conv_all = nn.Conv2d(args.k1+args.k2+args.k3, args.k4, (3, 3), padding=1)\n self.conv_dim = nn.Conv2d(args.k4, args.sem_feature, (1, 1))\n\n self.conv_uni = nn.Sequential(\n nn.Conv2d(1, args.emsize, (1, self.input_dim)),\n nn.ReLU()\n )\n\n tensor_mu = torch.FloatTensor(args.mu).to(self.device)\n tensor_sigma = torch.FloatTensor(args.sigma).to(self.device)\n\n self.mu = Variable(tensor_mu, requires_grad=False).view(1, 1, 1, self.nbins)\n self.sigma = Variable(tensor_sigma, requires_grad=False).view(1, 1, 1, self.nbins)\n\n if args.STR:\n self.output3 = nn.Linear(args.sem_feature+args.nbins*args.max_query_len+39, 1,True)\n else:\n self.output3 = nn.Linear(args.sem_feature+args.nbins*args.max_query_len, 1,True)", "def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar", "def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar", "def demo(image, model_class, do_add_noise=True):\n Log.enable_output = True\n Log.set_log_max_depth(8)\n\n image = normalise(image)\n image = numpy.expand_dims(image, axis=0)\n image = numpy.expand_dims(image, axis=0)\n noisy = add_noise(image) if do_add_noise else image\n print(noisy.shape)\n\n # noisy = models.tensor(noisy)\n image = torch.tensor(image)\n\n model = model_class(\n nb_unet_levels=2,\n spacetime_ndim=2,\n )\n\n print(\"training starts\")\n\n start = time.time()\n n2t_train(noisy, model, nb_epochs=128)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n noisy = torch.tensor(noisy)\n model.eval()\n model = model.cpu()\n print(f\"noisy tensor shape: {noisy.shape}\")\n # in case of batching we have to do this:\n start = time.time()\n denoised = model(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n noisy = noisy.detach().numpy()[0, 0, :, :]\n image = image.detach().numpy()[0, 0, :, :]\n denoised = denoised.detach().numpy()[0, 0, :, :]\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n\n return calculate_print_psnr_ssim(image, noisy, denoised)\n\n # import napari\n #\n # viewer = napari.Viewer() # no prior setup needed\n # viewer.add_image(image, name='image')\n # viewer.add_image(noisy, name='noisy')\n # viewer.add_image(denoised, name='denoised')\n # napari.run()", "def __init__(self, img_size, latent_dim=10):\n super(EncoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.latent_dim = latent_dim\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)\n self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.conv_64 = nn.Conv2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n\n # Fully connected layers\n self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n\n # Fully connected layers for mean and variance\n self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)", "def self_attention(self, hidden):\n mul1 = self.attention1(hidden)\n mul2 = self.attention2(mul1)\n return self.softmax(mul2)", "def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(EncoderCNN, self).__init__()\n self.off_model = OffsetCNN()\n self.sig_model = SignificanceCNN()\n self.sigmoid = nn.Sigmoid()\n self.W = nn.Conv3d(1, 1, (5, 1, 1))", "def __init__(\n self,\n in_channels=128,\n aux_channels=128,\n channels=64,\n out_channels=1,\n num_embs=100,\n num_spk_embs=128,\n spk_emb_dim=128,\n concat_spk_emb=False,\n kernel_size=9,\n dilation=2,\n bias=True,\n noise_upsample_scales=[11, 2, 2, 2],\n noise_upsample_activation=\"LeakyReLU\",\n noise_upsample_activation_params={\"negative_slope\": 0.2},\n upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],\n upsample_mode=\"nearest\",\n gated_function=\"softmax\",\n use_weight_norm=True,\n ):\n super().__init__()\n\n self.in_channels = in_channels\n\n # define id embedding\n self.emb = torch.nn.Embedding(\n num_embeddings=num_embs, embedding_dim=aux_channels\n )\n self.spk_emb = torch.nn.Embedding(\n num_embeddings=num_spk_embs, embedding_dim=spk_emb_dim\n )\n self.concat_spk_emb = concat_spk_emb\n if not concat_spk_emb:\n assert aux_channels == spk_emb_dim\n else:\n aux_channels = aux_channels + spk_emb_dim\n\n noise_upsample = []\n in_chs = in_channels\n for noise_upsample_scale in noise_upsample_scales:\n # NOTE(kan-bayashi): How should we design noise upsampling part?\n noise_upsample += [\n torch.nn.ConvTranspose1d(\n in_chs,\n channels,\n noise_upsample_scale * 2,\n stride=noise_upsample_scale,\n padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,\n output_padding=noise_upsample_scale % 2,\n bias=bias,\n )\n ]\n noise_upsample += [\n getattr(torch.nn, noise_upsample_activation)(\n **noise_upsample_activation_params\n )\n ]\n in_chs = channels\n self.noise_upsample = torch.nn.Sequential(*noise_upsample)\n self.noise_upsample_factor = np.prod(noise_upsample_scales)\n\n self.blocks = torch.nn.ModuleList()\n aux_chs = aux_channels\n for upsample_scale in upsample_scales:\n self.blocks += [\n TADEResBlock(\n in_channels=channels,\n aux_channels=aux_chs,\n kernel_size=kernel_size,\n dilation=dilation,\n bias=bias,\n upsample_factor=upsample_scale,\n upsample_mode=upsample_mode,\n gated_function=gated_function,\n ),\n ]\n aux_chs = channels\n self.upsample_factor = np.prod(upsample_scales)\n\n self.output_conv = torch.nn.Sequential(\n torch.nn.Conv1d(\n channels,\n out_channels,\n kernel_size,\n 1,\n bias=bias,\n padding=(kernel_size - 1) // 2,\n ),\n torch.nn.Tanh(),\n )\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n # reset parameters\n self.reset_parameters()", "def __init__(self, idim, odim_tgt, odim_src, args, ignore_id=-1):\n torch.nn.Module.__init__(self)\n if args.transformer_attn_dropout_rate is None:\n args.transformer_attn_dropout_rate = args.dropout_rate\n\n # special tokens and model dimensions\n self.pad = 0\n self.sos_tgt = odim_tgt - 1\n self.eos_tgt = odim_tgt - 1\n self.sos_src = odim_src - 1\n self.eos_src = odim_src - 1\n self.odim_tgt = odim_tgt\n self.odim_src = odim_src\n self.idim = idim\n self.adim = args.adim\n self.ignore_id = ignore_id\n\n # submodule\n self.mtlalpha = getattr(args, \"mtlalpha\", 0.0)\n self.asr_weight = getattr(args, \"asr_weight\", 0.0)\n self.mt_weight = getattr(args, \"mt_weight\", 0.0)\n self.num_decoders = getattr(args, \"num_decoders\", 2)\n self.do_st = getattr(args, \"do_st\", True)\n self.do_mt = getattr(args, \"do_mt\", self.mt_weight > 0.0)\n self.do_asr = self.asr_weight > 0 and self.mtlalpha < 1\n\n # cross-attention parameters\n self.cross_weight = getattr(args, \"cross_weight\", 0.0)\n self.cross_self = getattr(args, \"cross_self\", False)\n self.cross_src = getattr(args, \"cross_src\", False)\n self.cross_operator = getattr(args, \"cross_operator\", None)\n self.cross_to_asr = getattr(args, \"cross_to_asr\", False)\n self.cross_to_st = getattr(args, \"cross_to_st\", False)\n self.wait_k_asr = getattr(args, \"wait_k_asr\", 0)\n self.wait_k_st = getattr(args, \"wait_k_st\", 0)\n self.cross_src_from = getattr(args, \"cross_src_from\", \"embedding\")\n self.cross_self_from = getattr(args, \"cross_self_from\", \"embedding\")\n self.cross_shared = getattr(args, \"cross_shared\", False)\n self.cross_weight_learnable = getattr(args, \"cross_weight_learnable\", False)\n\n # one-to-many models parameters\n self.use_joint_dict = getattr(args, \"use_joint_dict\", True)\n self.one_to_many = getattr(args, \"one_to_many\", False)\n self.use_lid = getattr(args, \"use_lid\", False)\n if self.use_joint_dict:\n self.langs_dict = getattr(args, \"langs_dict_tgt\", None)\n self.lang_tok = getattr(args, \"lang_tok\", None)\n self.lang_tok_mt = getattr(args, \"lang_tok_mt\", None)\n\n self.subsample = get_subsample(args, \n mode='mt' if self.do_mt else 'st', \n arch='transformer')\n self.reporter = MTReporter() if self.do_mt else Reporter() \n self.normalize_before = getattr(args, \"normalize_before\", True)\n\n # Backward compatability\n if self.cross_operator in [\"sum\", \"concat\"]:\n if self.cross_self and self.cross_src:\n self.cross_operator = \"self_src\" + self.cross_operator\n elif self.cross_self:\n self.cross_operator = \"self_\" + self.cross_operator\n elif self.cross_src:\n self.cross_operator = \"src_\" + self.cross_operator\n if self.cross_operator:\n assert self.cross_operator in ['self_sum', 'self_concat', 'src_sum', \n 'src_concat', 'self_src_sum', 'self_src_concat']\n\n # Check parameters\n if self.one_to_many:\n self.use_lid = True\n if not self.do_st:\n assert (not self.cross_to_asr) and (not self.cross_to_st)\n if self.cross_operator and 'sum' in self.cross_operator and self.cross_weight <= 0:\n assert (not self.cross_to_asr) and (not self.cross_to_st)\n if self.cross_to_asr or self.cross_to_st:\n assert self.do_st and self.do_asr\n assert self.cross_self or self.cross_src\n assert bool(self.cross_operator) == (self.do_asr and (self.cross_to_asr or self.cross_to_st))\n if self.cross_src_from != \"embedding\" or self.cross_self_from != \"embedding\":\n assert self.normalize_before\n if self.wait_k_asr > 0:\n assert self.wait_k_st == 0\n elif self.wait_k_st > 0:\n assert self.wait_k_asr == 0\n else:\n assert self.wait_k_asr == 0\n assert self.wait_k_st == 0\n\n logging.info(\"*** Cross attention parameters ***\")\n if self.cross_to_asr:\n logging.info(\"| Cross to ASR\")\n if self.cross_to_st:\n logging.info(\"| Cross to ST\")\n if self.cross_self:\n logging.info(\"| Cross at Self\")\n if self.cross_src:\n logging.info(\"| Cross at Source\")\n if self.cross_to_asr or self.cross_to_st:\n logging.info(f'| Cross operator: {self.cross_operator}')\n logging.info(f'| Cross sum weight: {self.cross_weight}')\n if self.cross_src:\n logging.info(f'| Cross source from: {self.cross_src_from}')\n if self.cross_self:\n logging.info(f'| Cross self from: {self.cross_self_from}')\n logging.info(f\"Use joint dictionary: {self.use_joint_dict}\")\n \n if (self.cross_src_from != \"embedding\" and self.cross_src) \\\n and (not self.normalize_before):\n logging.warning(f'WARNING: Resort to using \\\n self.cross_src_from == embedding for cross at source attention.')\n if (self.cross_self_from != \"embedding\" and self.cross_self) \\\n and (not self.normalize_before):\n logging.warning(f'WARNING: Resort to using \\\n self.cross_self_from == embedding for cross at self attention.')\n\n # Adapters\n self.use_adapters = getattr(args, \"use_adapters\", False)\n self.use_adapters_in_enc = getattr(args, \"use_adapters_in_enc\", False)\n adapter_names = getattr(args, \"adapters\", None)\n adapter_reduction_factor = getattr(args, \"adapter_reduction_factor\", None)\n adapter_reduction_factor_enc = getattr(args, \"adapter_reduction_factor_enc\", adapter_reduction_factor)\n use_adapters_for_asr = getattr(args, \"use_adapters_for_asr\", True)\n adapter_before_src_attn = getattr(args, \"adapter_before_src_attn\", False)\n adapter_after_mha = getattr(args, \"adapter_after_mha\", False)\n use_shared_adapters = getattr(args, \"use_shared_adapters\", False)\n use_shared_adapters_enc = getattr(args, \"use_shared_adapters_enc\", False)\n # if self.use_adapters and not use_adapters_for_asr:\n # assert not self.do_asr or \\\n # (self.do_asr and self.num_decoders != 1) or \\\n # (self.do_asr and not self.do_st) # for backward compatibility\n\n if adapter_names:\n if self.do_asr and not self.do_st:\n adapter_names = [str(args.char_list_src.index(f'<2{l}>')) for l in adapter_names]\n else:\n adapter_names = [str(args.char_list_tgt.index(f'<2{l}>')) for l in adapter_names]\n logging.info(f'| adapters = {adapter_names}')\n\n if self.do_st or self.do_asr:\n logging.info(f'Speech encoder')\n self.encoder = Encoder(\n idim=idim,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.eunits,\n num_blocks=args.elayers,\n input_layer=getattr(args, \"transformer_input_layer\", \"conv2d\"),\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n attention_dropout_rate=args.transformer_attn_dropout_rate,\n adapter_names=adapter_names if self.use_adapters_in_enc else None,\n reduction_factor=adapter_reduction_factor_enc,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters_enc,\n )\n if self.do_st:\n logging.info('ST decoder')\n self.decoder = Decoder(\n odim=odim_tgt,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n cross_operator=self.cross_operator if self.cross_to_st else None,\n cross_shared=self.cross_shared,\n cross_weight_learnable=self.cross_weight_learnable,\n cross_weight=self.cross_weight,\n use_output_layer=True if (self.use_joint_dict or \\\n (self.do_st and not self.do_asr)) else False,\n adapter_names=adapter_names,\n reduction_factor=adapter_reduction_factor,\n adapter_before_src_attn=adapter_before_src_attn,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters,\n )\n if self.do_asr:\n logging.info('ASR decoder')\n self.decoder_asr = Decoder(\n odim=odim_src,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n cross_operator=self.cross_operator if self.cross_to_asr else None,\n cross_shared=self.cross_shared,\n cross_weight_learnable=self.cross_weight_learnable,\n cross_weight=self.cross_weight,\n use_output_layer=True if (self.use_joint_dict or \\\n (self.do_asr and not self.do_st)) else False,\n adapter_names=adapter_names,\n reduction_factor=adapter_reduction_factor,\n adapter_before_src_attn=adapter_before_src_attn,\n adapter_after_mha=adapter_after_mha,\n shared_adapters=use_shared_adapters,\n )\n if self.num_decoders == 1 and self.do_st:\n logging.info('*** Use shared decoders *** ')\n self.decoder_asr = self.decoder\n\n if not self.use_joint_dict and (self.do_st and self.do_asr):\n self.output_layer = torch.nn.Linear(args.adim, odim_tgt)\n self.output_layer_asr = torch.nn.Linear(args.adim, odim_src)\n\n # submodule for MT task\n if self.do_mt:\n logging.info('MT encoder')\n self.encoder_mt = Encoder(\n idim=odim_src,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n input_layer='embed',\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n attention_dropout_rate=args.transformer_attn_dropout_rate,\n padding_idx=0\n )\n if not self.do_st:\n logging.info('MT decoder')\n self.decoder_mt = Decoder(\n odim=odim_tgt,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n self_attention_dropout_rate=args.transformer_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_attn_dropout_rate,\n normalize_before=self.normalize_before,\n use_output_layer=True,\n )\n self.reset_parameters(args) # place after the submodule initialization\n if self.mtlalpha > 0.0:\n self.ctc = CTC(odim_src, args.adim, args.dropout_rate, \n ctc_type=args.ctc_type, reduce=True,\n zero_infinity=True)\n else:\n self.ctc = None\n\n if self.asr_weight > 0 and (args.report_cer or args.report_wer):\n from espnet.nets.e2e_asr_common import ErrorCalculator\n self.error_calculator = ErrorCalculator(args.char_list_src,\n args.sym_space, args.sym_blank,\n args.report_cer, args.report_wer)\n elif self.do_mt and getattr(args, \"report_bleu\", False):\n from espnet.nets.e2e_mt_common import ErrorCalculator\n self.error_calculator = ErrorCalculator(args.char_list_tgt,\n args.sym_space,\n args.report_bleu)\n else:\n self.error_calculator = None\n self.rnnlm = None\n\n # criterion\n if self.do_st:\n self.criterion_st = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n if self.do_asr:\n self.criterion_asr = LabelSmoothingLoss(self.odim_src, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n if self.do_mt:\n self.criterion_mt = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,\n args.transformer_length_normalized_loss)\n self.normalize_length = args.transformer_length_normalized_loss # for PPL\n\n # Language embedding layer\n if self.lang_tok == \"encoder-pre-sum\":\n self.language_embeddings = build_embedding(self.langs_dict, self.idim, \n padding_idx=self.pad)\n logging.info(f'language_embeddings: {self.language_embeddings}')\n\n # Backward compatability\n if self.cross_operator:\n if \"sum\" in self.cross_operator:\n self.cross_operator = \"sum\"\n if \"concat\" in self.cross_operator: \n self.cross_operator = \"concat\"", "def model_processing(img):\n\n # assert isinstance(img, EmotionalImage)\n\n if str(img.name).find('json') > -1:\n return\n user = get_user(img.path + '/' + 'meta.json')\n filePath = img.path + '/' + img.name\n # print(\"---------------Processsing----------------\", img.name)\n\n features = extract_features(filePath)\n emotions = predict_emotions(features)\n uuid1 = uuid.uuid4()\n emImage = EmotionalImage(\n uuid1, img.name, img.path, features, emotions, \"\", \"\", \"\")\n user.images.append(emImage)\n # user.save()", "def __init__(self, kernel_size=11, log_t=False):\n super(Attention, self).__init__()\n assert kernel_size % 2 == 1, \"Kernel size should be odd for 'same' conv.\"\n padding = (kernel_size - 1) // 2\n self.conv = nn.Conv1d(1, 1, kernel_size, padding=padding)\n self.log_t = log_t", "def get_attention_multiscale_batch(self, X,X_mask):\n layer = -3\n attmodel1 = Model(self.model.inputs,self.model.get_layer('att1').output[1])\n attmodel2 = Model(self.model.inputs,self.model.get_layer('att2').output[1])\n attmodel3 = Model(self.model.inputs,self.model.get_layer('att3').output[1])\n return attmodel1.predict([X,X_mask.reshape(-1,X_mask.shape[1],1)],batch_size=100),attmodel2.predict([X,X_mask.reshape(-1,X_mask.shape[1],1)],batch_size=100),attmodel3.predict([X,X_mask.reshape(-1,X_mask.shape[1],1)],batch_size=100)", "def __init__(\n self,\n d_model: int,\n nheads: int,\n ff_dim: int,\n bias: bool = False,\n dropout: float = 0.2,\n ):\n super().__init__()\n self.d_model = d_model\n self.nheads = nheads\n self.ff_dim = ff_dim\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.self_attention = MultiHeadAttention(d_model, nheads, bias, dropout)\n self.feedforward = PositionwiseFeedforward(d_model, ff_dim)", "def mult_reads_gmm(reads, training_reads, components):\n\n\tprediction_zero_100 = 0\n\tprediction_one_100 = 0\n\tprediction_zero_200 = 0\n\tprediction_one_200 = 0\n\n\tbase_opts = ['A', 'C', 'G', 'T']\n\n\n\tmodel = mixture.GMM(n_components=components, covariance_type='spherical')\n\tnum_reads = len(reads)\n\n\ttraining_reads = [read.get_read().replace('\\'', '') for read in training_reads]\n\n\tread_input = [read.get_read().replace('\\'', '') for read in reads]\n\t# alignment_inputs = []\n\t# alignment_inputs.extend(read.get_alignments())\n\n\t# Generates observations\n\t# bases are converted to their ascii character values\n\tread_list = []\n\tfor read in read_input:\n\t\tread_char = [convert_letter(c) for c in read]\n\t\tread_list.append(read_char)\n\n\tobservations = []\n\t\n\tfor alignment in training_reads:\n\t\talignment_list = [convert_letter(c) for c in alignment] \n\t\tobservations.append( alignment_list )\n\t# for base_index, base in enumerate(read_main):\n\t# \tbase_observations = [ord(base)]\n\t# \tfor alignment in alignments:\n\t# \t\tbase_observations.append(ord(alignment[base_index]))\n\n\t# \tobservations.append(base_observations)\n\n\tmodel.fit(observations)\n\tmeans = np.round(model.means_, 2)\n\tcovars = np.round(model.covars_, 2)\n\tconverted_means = []\n\tfor num_list in means:\n\t\t# convert to nearest acceptable letter\n\t\t#char_means = [chr(int(n)) for n in num_list]\n\t\tchar_means = [convert_to_letter(n) for n in num_list]\n\t\tconverted_means.append(char_means)\n\t\n\tpredictions = model.predict(read_list)\n\n\tread_predictions = []\n\tfor index, prediction in enumerate(predictions):\n\t\tmapping = [prediction, reads[index]]\n\t\tread_predictions.append(mapping)\n\t\n\n\tfor read_pr in read_predictions:\n\t\t\n\t\tprediction = read_pr[0]\n\t\t# def filt(x): return x[0] == prediction\n\t\t# matches = filter(filt, read_predictions)\n\t\tpr = prediction\n\t\trps = int(float(read_pr[1].get_position()))\n\t\t# print '\\n'\n\t\t# print prediction\n\t\t# print 'Converted Means: '\n\t\t# print ''.join(converted_means[prediction])\n\t\t# print 'Actual Read'\n\t\t# print read_pr[1].get_read()\n\t\t# print read_pr[1].get_position()\n\t\t# print 'Matches'\n\t\t# for m in matches:\n\t\t# \tprint m[1].get_read() + ' Position: ' + m[1].get_position()\n\t\t# \tm[1].print_read()\n\n\t\tif pr == 0:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_zero_100 = prediction_zero_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_zero_200 = prediction_zero_200 + 1\n\t\t\t\t\n\t\telse:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_one_100 = prediction_one_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_one_200 = prediction_one_200 + 1\n\t\t\t\t\n\n\tprint '\\n-------------Predictions---------------------'\n\tprint 'Prediction: 0 Position: 100 Num: ' + str(prediction_zero_100)\n\tprint 'Prediction: 1 Position: 100 Num: ' + str(prediction_one_100)\n\tprint 'Prediction: 0 Position: 200 Num: ' + str(prediction_zero_200)\n\tprint 'Prediction: 1 Position: 200 Num: ' + str(prediction_one_200)\n\n\tprint '\\n------Means: -----------'\n\tfor mean in converted_means:\n\t\tprint ''.join(mean) \n\n\t# for index, prediction in enumerate(predictions):\n\t# \tprint 'Read: '\n\t# \tprint reads[index].get_read()\n\t# \tprint 'Prediction: '\n\t# \tprint prediction\n\t# \tprint converted_means[prediction]\n\t# \tprint 'Means: '\n\t# \tprint means[prediction]\n\t# \tprint covars[prediction]\n\t# \tprint '----------------------------------------\\n'\n\n\n\t# posteriors = model.predict_proba(read_list)\n\t# print model.get_params(deep=True)\n\t# sample = model.sample()\n\t# print [convert_to_letter(n) for n in sample[0]]", "def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )", "def forward_test(self, img, img_metas, **kwargs):\n labels = {}\n labels['trans_inv'] = kwargs['trans_inv']\n labels['intrinsic_param'] = kwargs['intrinsic_param']\n labels['joint_root'] = kwargs['joint_root']\n labels['depth_factor'] = kwargs['depth_factor']\n labels['target_uvd_29'] = kwargs['target_uvd_29']\n labels['target_xyz_24'] = kwargs['target_xyz_24']\n labels['target_weight_24'] = kwargs['target_weight_24']\n labels['target_weight_29'] = kwargs['target_weight_29']\n labels['target_xyz_17'] = kwargs['target_xyz_17']\n labels['target_weight_17'] = kwargs['target_weight_17']\n labels['target_theta'] = kwargs['target_theta']\n labels['target_beta'] = kwargs['target_beta']\n labels['target_smpl_weight'] = kwargs['target_smpl_weight']\n labels['target_theta_weight'] = kwargs['target_theta_weight']\n labels['target_twist'] = kwargs['target_twist']\n labels['target_twist_weight'] = kwargs['target_twist_weight']\n\n bboxes = kwargs['bbox']\n\n for k, _ in labels.items():\n labels[k] = labels[k].cuda()\n\n trans_inv = labels.pop('trans_inv')\n intrinsic_param = labels.pop('intrinsic_param')\n joint_root = labels.pop('joint_root')\n depth_factor = labels.pop('depth_factor')\n if len(depth_factor.shape) != 2:\n depth_factor = torch.unsqueeze(depth_factor, dim=1)\n\n if self.backbone is not None:\n img = img.cuda().requires_grad_()\n features = self.backbone(img)\n features = features[0]\n else:\n features = img['features']\n\n if self.neck is not None:\n features = self.neck(features)\n\n output = self.head(features, trans_inv, intrinsic_param, joint_root,\n depth_factor, self.smpl)\n\n pred_uvd_jts = output['pred_uvd_jts']\n batch_num = pred_uvd_jts.shape[0]\n pred_xyz_jts_24 = output['pred_xyz_jts_24'].reshape(batch_num, -1,\n 3)[:, :24, :]\n pred_xyz_jts_24_struct = output['pred_xyz_jts_24_struct'].reshape(\n batch_num, 24, 3)\n pred_xyz_jts_17 = output['pred_xyz_jts_17'].reshape(batch_num, 17, 3)\n pred_mesh = output['pred_vertices'].reshape(batch_num, -1, 3)\n\n pred_xyz_jts_24 = pred_xyz_jts_24.cpu().data.numpy()\n pred_xyz_jts_24_struct = pred_xyz_jts_24_struct.cpu().data.numpy()\n pred_xyz_jts_17 = pred_xyz_jts_17.cpu().data.numpy()\n pred_uvd_jts = pred_uvd_jts.cpu().data\n pred_mesh = pred_mesh.cpu().data.numpy()\n pred_pose = output['pred_pose'].cpu().data.numpy()\n pred_beta = output['pred_shape'].cpu().data.numpy()\n\n assert pred_xyz_jts_17.ndim in [2, 3]\n pred_xyz_jts_17 = pred_xyz_jts_17.reshape(pred_xyz_jts_17.shape[0], 17,\n 3)\n pred_uvd_jts = pred_uvd_jts.reshape(pred_uvd_jts.shape[0], -1, 3)\n pred_xyz_jts_24 = pred_xyz_jts_24.reshape(pred_xyz_jts_24.shape[0], 24,\n 3)\n pred_scores = output['maxvals'].cpu().data[:, :29]\n\n hm_shape = [64, 64]\n pose_coords_list = []\n for i in range(pred_xyz_jts_17.shape[0]):\n bbox = bboxes[i].tolist()\n pose_coords, _ = heatmap2coord(\n pred_uvd_jts[i],\n pred_scores[i],\n hm_shape,\n bbox,\n mean_bbox_scale=None)\n pose_coords_list.append(pose_coords)\n\n all_preds = {}\n all_preds['vertices'] = pred_mesh\n all_preds['smpl_pose'] = pred_pose\n all_preds['smpl_beta'] = pred_beta\n all_preds['xyz_17'] = pred_xyz_jts_17\n all_preds['uvd_jts'] = pose_coords\n all_preds['xyz_24'] = pred_xyz_jts_24_struct\n image_path = []\n for img_meta in img_metas:\n image_path.append(img_meta['image_path'])\n all_preds['image_path'] = image_path\n all_preds['image_idx'] = kwargs['sample_idx']\n return all_preds", "def forward_train(self, img, img_metas, **kwargs):", "def self_attention_layer(hparams, prefix):\n return transformer_layers.SelfAttention(\n num_heads=hparams.get(prefix + \"num_heads\"),\n num_memory_heads=hparams.get(prefix + \"num_memory_heads\"),\n key_value_size=hparams.d_kv,\n shared_kv=hparams.get(prefix + \"shared_kv\", False),\n attention_kwargs=attention_kwargs_from_hparams(hparams))", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def __init__(self):\n self.model = GaussianNB();\n self.X = iris.data\n self.y = iris.target", "def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()", "def extract_feat(self, img):\r\n _, _, x = self.pre_encoder(img)\r\n x = self.backbone(x)\r\n if self.with_neck:\r\n x = self.neck(x)\r\n return x", "def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()", "def __init__(self, **config):\n super(Classifier, self).__init__()\n self.input_dim_drug = config['hidden_dim_drug']\n self.input_dim_protein = config['hidden_dim_protein']\n self.hidden_dims = config['cls_hidden_dims']\n self.visual_attention=config['visual_attention']\n dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [2]\n if config['attention']:\n if config['concatenation']:\n dims[0]+=config['cnn_target_filters'][-1]\n else:\n dims[0]=self.input_dim_drug+config['cnn_target_filters'][-1]\n self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(len(self.hidden_dims)+1)])\n self.dropout = nn.Dropout(0.25)\n self._initialize()", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def _build_attention(\n self, encoder_outputs, encoder_sequence_length, attention_bias\n ):\n with tf.variable_scope('AttentionMechanism'):\n attention_depth = self.params['attention_layer_size']\n if self.params['attention_type'] == 'location':\n attention_mechanism = LocationSensitiveAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n use_bias=attention_bias,\n )\n elif self.params['attention_type'] == 'bahdanau':\n bah_normalize = self.params.get('bahdanau_normalize', False)\n attention_mechanism = BahdanauAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n normalize=bah_normalize,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n )\n else:\n raise ValueError('Unknown Attention Type')\n return attention_mechanism", "def __init__(self, encoder_size, decoder_size, label_size):\n super(BilinearAttention, self).__init__()\n self.W = nn.Parameter(torch.zeros(label_size, decoder_size, encoder_size))\n self.u = nn.Parameter(torch.zeros(label_size, encoder_size))\n self.v = nn.Parameter(torch.zeros(label_size, decoder_size))\n self.b = nn.Parameter(torch.zeros(label_size))\n \n nn.init.xavier_uniform_(self.W)\n nn.init.xavier_uniform_(self.u)\n nn.init.xavier_uniform_(self.v)", "def __init__(self,\n num_units,\n line_memory,\n word_memory=None,\n soft_weight=None,\n hierarchy=True,\n line_memory_sequence_length=None,\n word_memory_sequence_length=None,\n scale=False,\n probability_fn=None,\n score_mask_value=float(\"-inf\"),\n name=\"CustomAttention\"):\n # For LuongAttention, we only transform the memory layer; thus\n # num_units **must** match expected the query depth.\n if probability_fn is None:\n probability_fn = nn_ops.softmax\n wrapped_probability_fn = lambda score: probability_fn(score)\n super(CustomAttention, self).__init__(\n query_layer=None,\n line_memory_layer=layers_core.Dense(\n num_units, name=\"line_memory_layer\", use_bias=False),\n line_memory=line_memory,\n word_memory=word_memory,\n probability_fn=wrapped_probability_fn,\n line_memory_sequence_length=line_memory_sequence_length,\n word_memory_sequence_length=word_memory_sequence_length,\n score_mask_value=score_mask_value,\n name=name)\n self._num_units = num_units\n self._scale = scale\n self._name = name\n self._hierarchy = hierarchy\n self._soft_weight = soft_weight", "def __init__(self, mbart_layer, config):\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.weight, mbart_layer.self_attn.k_proj.weight, mbart_layer.self_attn.v_proj.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.bias, mbart_layer.self_attn.k_proj.bias, mbart_layer.self_attn.v_proj.bias]))\n self.out_proj_weight = mbart_layer.self_attn.out_proj.weight\n self.out_proj_bias = mbart_layer.self_attn.out_proj.bias\n self.linear1_weight = mbart_layer.fc1.weight\n self.linear1_bias = mbart_layer.fc1.bias\n self.linear2_weight = mbart_layer.fc2.weight\n self.linear2_bias = mbart_layer.fc2.bias\n self.norm1_eps = mbart_layer.self_attn_layer_norm.eps\n self.norm1_weight = mbart_layer.self_attn_layer_norm.weight\n self.norm1_bias = mbart_layer.self_attn_layer_norm.bias\n self.norm2_eps = mbart_layer.final_layer_norm.eps\n self.norm2_weight = mbart_layer.final_layer_norm.weight\n self.norm2_bias = mbart_layer.final_layer_norm.bias\n self.num_heads = mbart_layer.self_attn.num_heads\n self.embed_dim = mbart_layer.self_attn.embed_dim\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()", "def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, \n mlm_label=None, dvae_imgs=None, v_token_mask=None, hog_features=None, img_metas=None, **kwargs): \n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n text_input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n if mlm_label is not None:\n mlm_label = mlm_label.reshape((-1, ) + mlm_label.shape[2:])\n\n\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n\n B, D, T, H, W = visual_token.shape\n losses = dict()\n # -------------- nce loss ------------------- #\n if hasattr(self, 'ssl_head'):\n input_ssl_ids = torch.where(mlm_label == -100, token_ids.clone(), mlm_label.clone())\n input_ssl_mask = text_input_mask.clone()\n text_only_out = self.text_backbone(input_ssl_ids, input_ssl_mask)\n # ------------ complete T -------------- #\n text_out_no_mask = text_only_out['last_hidden_state']\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_no_mask, input_ssl_mask, input_ssl_ids)\n\n\n # ------------ complete V ---------------- #\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n\n\n # ---------- foward mask text input ---------- # \n text_out_with_mask = self.text_backbone(token_ids, text_input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # ---------- forward mask v input ------------ #\n visual_token_with_mask, v_mask = self.extract_visual_feat(imgs.clone(), v_token_mask) # b, d, T, h, w\n visual_token_mask = visual_token_with_mask.view(B, D, T, -1).permute(0, 2, 3, 1)\n \n v_fusion_output = self.multimodal_backbone(visual_token=visual_token_mask, text_input_mask=text_input_mask.clone(), text_input_embeds=text_out_no_mask.clone())\n \n t_fusion_output = self.multimodal_backbone(visual_token=visual_token, text_input_mask=text_input_mask, text_input_embeds=text_out_last_hidden_state)\n # for mlm #\n t_last_hidden_state = t_fusion_output['t_last_hidden_state']\n\n\n\n\n\n # ------------ MLM loss ------------ #\n\n if mlm_label is not None and self.mlm_head is not None:\n # we use mask text for MLM\n # because we doubt there will be miss interaction between wrong img-text pair \n # and the model not learn good relationship between vision and language\n # -------- forward masked text ----------- #\n mlm_prediction_score = self.mlm_head(t_last_hidden_state)\n \n if self.mlm_loss_func is not None:\n mlm_label_idx = torch.where(mlm_label.view(-1) != -100)\n mlm_prediction_mask_score = mlm_prediction_score.view(-1, self.text_vocab_size)[mlm_label_idx[0], :]\n mlm_label_mask = mlm_label.view(-1)[mlm_label_idx]\n mlm_loss = self.mlm_loss_func(mlm_prediction_mask_score, mlm_label_mask)\n else:\n mlm_loss = self.loss_func(mlm_prediction_score.view(-1, self.text_vocab_size), mlm_label.view(-1))\n losses['mlm_loss'] = mlm_loss\n\n\n # ------- Tri-modal alignment with mask sample and ranking --------- #\n if self.mlm_ssl_V_head is not None:\n mlm_visual_feat = v_fusion_output['t_last_hidden_state'][:, 0]\n mask_visual_recon_emb = self.mlm_ssl_V_head(mlm_visual_feat)\n mask_word_emb = self.ssl_head.forward_text(text_out_last_hidden_state) if self.use_Cmask else None\n loss_cvt_rank = self.ssl_loss(visual_emb, text_emb, mask_word_emb, mask_visual_recon_emb)\n losses.update(loss_cvt_rank)\n\n\n if self.symmetry_rank:\n mlm_word_feat = t_last_hidden_state[:, 0]\n mask_word_recon_emb = self.mlm_ssl_T_head(mlm_word_feat)\n\n mask_visual_emb = self.ssl_head.forward_vision(visual_token_with_mask) if self.use_Cmask else None\n \n loss_ctv_rank = self.ssl_loss(text_emb, visual_emb, mask_visual_emb, mask_word_recon_emb)\n loss_ctv_rank['v_nce_loss'] = loss_ctv_rank.pop('nce_loss')\n \n if self.ssl_loss.use_rank:\n loss_ctv_rank['rank_v_vm_loss'] = loss_ctv_rank.pop('rank_t_tm_loss')\n\n \n\n losses.update(loss_ctv_rank)\n\n\n\n return losses", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n attention_size=8\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = MultiAttentionLayers(1024,iterations,256,attention_size)#256\n audio_attention = MultiAttentionLayers(128,iterations,256/4,attention_size)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n pooled=tf.reshape(tf.transpose(pooled,perm=[0,2,1]),[-1,1152])\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n results_temp=aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)\n results_temp['predictions']=tf.reduce_max(tf.reshape(results_temp['predictions'],[-1,attention_size,vocab_size]),axis=1)\n print(results_temp)\n return results_temp", "def example():\n n = 500\n cov_a = np.array([[3, 0], [0, 1]], dtype=np.dtype(float))\n cov_b = np.array([[1, 0], [0, 3]], dtype=np.dtype(float))\n mean_a = np.array([0.0, 0.0])\n mean_b = np.array([0.0, 0.0])\n\n target_model = {\n 'd': 2,\n 'parameters': [\n (mean_a, cov_a),\n (mean_b, cov_b),\n ],\n 'assignment': np.array([0, 0, 1, 1], dtype=np.dtype(int))\n }\n\n prior = {\n 'nu_0': 3,\n 'kappa_0': 1,\n 'mu_0': np.zeros(2),\n 'lambda_0': np.eye(2)\n }\n\n data_model = NormalInverseWishart(**prior)\n t = Teacher(target_model, data_model, 1.0, t_std=1, fast_niw=True)\n t.mh(n, burn=500, lag=20, plot_diagnostics=False)\n\n X_orig = np.vstack((np.random.multivariate_normal(mean_a, cov_a, n),\n np.random.multivariate_normal(mean_b, cov_b, n)))\n X_opt, _ = t.get_stacked_data()\n\n plt.figure(tight_layout=True, facecolor='white')\n plt.scatter(X_opt[:, 0], X_opt[:, 1], color='royalblue', alpha=.5,\n label='optimized')\n plt.scatter(X_orig[:, 0], X_orig[:, 1], color='crimson', alpha=.5,\n label='original')\n plt.legend(loc=0)\n plt.show()", "def create_quantizable_multihead_attention(module: torch.nn.MultiheadAttention) -> QuantizableMultiheadAttention:\n # inspect MHA if bias is required.\n bias = module.in_proj_bias is not None\n\n # if bias k/v parameter exist set quantizable MHA to create 3 separate bias tensors as expected.\n add_bias_kv = module.bias_k is not None and module.bias_v is not None\n\n q_MHA = QuantizableMultiheadAttention(embed_dim=module.embed_dim, num_heads=module.num_heads,\n dropout=module.dropout, bias=bias, add_bias_kv=add_bias_kv,\n add_zero_attn=module.add_zero_attn, kdim=module.kdim, vdim=module.vdim,\n batch_first=module.batch_first)\n\n # copy over weight and bias tensors\n with torch.no_grad():\n if module.in_proj_weight is not None:\n weights_q, weights_k, weights_v = torch.chunk(module.in_proj_weight.data, 3, dim=0)\n else:\n weights_q = module.q_proj_weight.data\n weights_k = module.k_proj_weight.data\n weights_v = module.v_proj_weight.data\n q_MHA.linear_Q.weight.copy_(weights_q)\n q_MHA.linear_K.weight.copy_(weights_k)\n q_MHA.linear_V.weight.copy_(weights_v)\n\n q_MHA.out_proj.weight.copy_(module.out_proj.weight.data)\n\n if bias:\n bias_q, bias_k, bias_v = torch.chunk(module.in_proj_bias.data, 3, dim=0)\n if add_bias_kv:\n bias_k = q_MHA.linear_K.bias.copy_(module.bias_k.data)\n bias_v = q_MHA.linear_V.bias.copy_(module.bias_v.data)\n q_MHA.linear_K.bias.copy_(bias_k)\n q_MHA.linear_V.bias.copy_(bias_v)\n q_MHA.linear_Q.bias.copy_(bias_q)\n\n q_MHA.out_proj.bias.copy_(module.out_proj.bias.data)\n\n return q_MHA", "def __init__(self, options):\r\n nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.basemodel = torchvision.models.resnet18(pretrained=True)\r\n self.options = options\r\n\r\n #label\r\n self.label_primary = nn.Linear(options['primary_dim'], options['proj_dim'])\r\n self.label_dual = nn.Linear(options['dual_dim'], options['proj_dim'])\r\n\r\n #classifer/regressor\r\n self.fc_primary = nn.Linear(512 + options['proj_dim'], options['primary_dim'])\r\n self.fc_dual = nn.Linear(512 + options['proj_dim'], options['dual_dim'])\r\n\r\n\r\n if self.options['fc'] == True:\r\n # Freeze all previous layers.\r\n for param in self.basemodel.parameters():\r\n param.requires_grad = False\r\n # Initialize the fc layers.\r\n nn.init.kaiming_normal_(self.fc_primary.weight.data)\r\n if self.fc_primary.bias is not None:\r\n nn.init.constant_(self.fc_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.fc_dual.weight.data)\r\n if self.fc_dual.bias is not None:\r\n nn.init.constant_(self.fc_dual.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_primary.weight.data)\r\n if self.label_primary.bias is not None:\r\n nn.init.constant_(self.label_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_dual.weight.data)\r\n if self.label_dual.bias is not None:\r\n nn.init.constant_(self.label_dual.bias.data, val=0)\r\n\r\n\r\n else:\r\n for param in self.basemodel.conv1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.bn1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.layer1.parameters():\r\n param.requires_grad = False\r\n #for param in self.basemodel.layer2.parameters():\r\n # param.requires_grad = False\r\n #for param in self.basemodel.layer3.parameters():\r\n # param.requires_grad = False\r", "def __init__(self, \n num_vars, \n num_hidden,\n training_inputs = None,\n algorithm = None,\n algorithm_dict = None,\n batch_size = None,\n use_momentum = None,\n W0= None, \n b0= None, \n bhid0 = None,\n zero_diag = True,\n symmetric = True,\n report_p_tilda =False,\n learn_biases = True,\n test_mode= False,\n training = True):\n \n self.num_vars = num_vars\n \n self.num_hidden = num_hidden\n \n self.batch_size = batch_size\n \n self.zero_diag = zero_diag\n \n self.algorithm = algorithm\n \n self.num_samples = 0\n \n self.num_u_gibbs = 0\n \n self.gibbs_steps = 0\n \n self.resample = False\n \n self.uniform = False\n \n self.mixture = False\n \n self.mix_params = []\n \n self.m_params = []\n \n self.mf_steps = 0\n \n self.alpha = 0\n \n self.learn_biases = learn_biases\n \n if isinstance(algorithm_dict, dict):\n \n for param in algorithm_dict.keys():\n \n if param == 'resample':\n \n self.resample = algorithm_dict[param]\n \n if param == 'mf_steps':\n \n self.mf_steps = algorithm_dict[param]\n \n if param == \"gibbs_steps\":\n \n self.gibbs_steps = algorithm_dict[param]\n \n if param == \"num_samples\":\n \n self.num_samples = algorithm_dict[param]\n \n if param == \"num_u_gibbs\":\n \n self.num_u_gibbs = algorithm_dict[param]\n \n if param == \"uniform\":\n \n self.uniform = algorithm_dict[param] \n \n if param == \"mixture\":\n \n self.mixture = algorithm_dict[param] \n \n if param == \"mix_params\":\n \n self.mix_params = algorithm_dict[param] \n \n if param == \"alpha\" and algorithm_dict[param] != None:\n #### alpha defines transition rate from\n #### uniform to mean-field distribution\n self.alpha = algorithm_dict[param] \n \n self.m_params = (1-self.alpha)*0.5*np.ones([1,self.num_vars])+\\\n self.alpha*np.mean(training_inputs,0)\n \n self.use_momentum = use_momentum\n \n self.report_p_tilda = report_p_tilda\n \n self.side = int(np.sqrt(self.num_vars))\n \n self.np_rand_gen = np.random.RandomState(1234)\n \n self.theano_rand_gen =\\\n theano.sandbox.rng_mrg.MRG_RandomStreams(self.np_rand_gen.randint(2**30))\n \n #self.theano_rand_gen =\\\n #T.shared_randomstreams.RandomStreams(self.np_rand_gen.randint(2**30))\n \n theano.config.exception_verbosity = 'high'\n \n self.node_indices = \\\n theano.shared(np.arange(self.num_vars), name=\"node_indices\")\n \n self.x = T.matrix('x')\n \n self.x_tilda = T.matrix('x_tilda')\n \n self.sampler_theta = T.matrix('sampler_theta')\n \n self.symmetric = symmetric\n \n if training:\n \n if self.num_hidden ==0:\n \n self.num_x2 = self.num_vars\n \n elif self.num_hidden > 0 :\n \n self.num_x2 = self.num_hidden\n \n self.updates = OrderedDict()\n \n self.N_train = training_inputs.shape[0]\n \n self.train_inputs = theano.shared(np.asarray(training_inputs,\n dtype=theano.config.floatX),\n borrow= True)\n \n self.learning_rate = T.dscalar('learning_rate')\n \n if self.mixture:\n \n print(\"Importance distribution was specified as mixture\"+\\\n \" of Bernoulli products\")\n \n if self.mix_params == []:\n print(\"Error: parameters defining mixture means were\"+\\\n \" not provided\")\n sys.exit()\n \n self.set_mixture_means(inputs = training_inputs)\n \n if use_momentum:\n \n print(\"Will add momentum term to gradient computations\")\n \n self.momentum = T.dscalar('learning_rate')\n \n self.grad_vec = {}\n \n self.grad_vec['W'] = theano.shared(np.zeros([self.num_vars, self.num_x2],\n dtype = theano.config.floatX), name = 'W_momentum', borrow = True)\n \n if self.num_hidden > 0:\n \n self.grad_vec['bhid'] = theano.shared(np.zeros([self.num_x2],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n self.grad_vec['b'] = theano.shared(np.zeros([self.num_vars],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n if test_mode:\n \n b_init =self.np_rand_gen.uniform(0,1, num_vars)\n \n W_init =self.np_rand_gen.uniform(0,1, size = (num_vars, num_vars))\n \n # also tested ones\n # b_init = np.ones(num_vars)\n \n # W_init = np.ones([num_vars, num_vars])\n \n self.b_init= np.asarray(b_init, dtype = theano.config.floatX)\n \n self.W_init= np.asarray(W_init, dtype = theano.config.floatX)\n \n self.b = theano.shared(self.b_init, name='b', borrow = False)\n \n self.W = theano.shared(self.W_init, name='W', borrow = False)\n \n print(\"Initialized with test mode\")\n \n else:\n \n if W0 is None:\n \n if self.num_hidden > 0:\n \n W0_init =\\\n self.np_rand_gen.uniform(\n -4*np.sqrt(6.0/(self.num_vars+self.num_hidden)),\\\n 4*np.sqrt(6.0 /(self.num_vars + self.num_hidden)), \n size = (num_vars, self.num_hidden)\n )\n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX) \n \n if self.num_hidden == 0:\n \n # different W initializations: \n \n # W0_init =\\\n # self.np_rand_gen.uniform(-np.sqrt(3.0/(num_vars)),\\\n # np.sqrt(3.0 / (num_vars)), size = (num_vars, num_vars))\n \n # W0_init =\\\n # self.np_rand_gen.uniform(-0.00000001,\\\n # 0.00000001, size = (num_vars, num_vars))\n \n W0_init = 0.00000001*\\\n self.np_rand_gen.normal(size = (num_vars, self.num_x2)) \n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX)\n \n if self.symmetric:\n \n W0 = (W0 + np.transpose(W0))/2.0\n \n if self.zero_diag:\n \n W0 = W0 - np.diag(np.diag(W0))\n \n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if self.num_hidden == 0:\n \n test_W = self.W.get_value() \n \n assert sum(np.diag(test_W)) == 0.0\n \n assert (test_W == np.transpose(test_W)).all() == True\n \n else:\n print(\"W is initialized with provided array\")\n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if b0 is None:\n \n bias_init = np.zeros(num_vars, dtype = theano.config.floatX)\n \n self.b = theano.shared(value= bias_init, name='b', borrow=True)\n \n else:\n print(\"b vector is initialized with provided vector\")\n self.b = theano.shared(value= b0, name='b', borrow=True)\n \n if bhid0 is None and self.num_hidden > 0:\n \n hbias_init = np.zeros(self.num_hidden, dtype = theano.config.floatX)\n \n self.bhid = theano.shared(value= hbias_init, name='bhid', borrow=True)\n \n elif (bhid0 is not None) and (self.num_hidden > 0):\n print(\"bhid vector is initialized with provided vector\") \n self.bhid = theano.shared(value= bhid0, name='bhid', borrow=True)\n \n self.theta = [self.W, self.b]\n \n if self.num_hidden > 0 :\n \n self.theta.append(self.bhid)\n \n self.train_set = set(range(self.N_train))\n \n self.minibatch_set = T.ivector('minibatch_set')\n \n self.sample_set = T.ivector('sample_set')\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n self.persistent_gibbs =\\\n theano.shared(np.ones([self.batch_size,self.num_hidden],\n dtype=theano.config.floatX),\n borrow = True, \n name= \"persistent_gibbs\")\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0:\n \n init_mf_vis = self.np_rand_gen.uniform(0, \n 1, \n size =(self.num_vars,1))\n \n init_mf_vis = np.asarray(init_mf_vis, dtype = theano.config.floatX)\n \n self.mf_vis_p = theano.shared(init_mf_vis, \n name= \"mf_vis_p\", \n borrow= True)\n \n if self.num_hidden > 0:\n \n init_mf_hid = \\\n self.np_rand_gen.uniform(0, 1, size =(self.num_hidden,1))\n \n init_mf_hid = np.asarray(init_mf_hid, \n dtype = theano.config.floatX)\n \n self.mf_hid_p = theano.shared(init_mf_hid, \n name= \"mf_hid_p\", \n borrow= True)\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0: \n \n if self.num_hidden ==0: \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")", "def __init__(self, sigma=0.4, alpha=1.0, reset_always=True):\n super().__init__()\n self.sigma = sigma\n self.alpha = alpha\n self.reset_always = reset_always\n self.guided_attn_masks = None\n self.masks = None", "def forward(self, input):\n x = self.emb(input)\n output = self.attention(x)\n # print(\"Idggfghrf \", output.size())\n return output", "def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, direct=False, supervised=True):\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat,\n nhid,\n dropout=dropout,\n alpha=alpha,\n concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads,\n nhid,\n dropout=dropout,\n alpha=alpha,\n concat=False)\n self.fm = FMLayer(nfeat, nhid)\n self.supervised = supervised\n if self.supervised:\n n_output = nclass\n else:\n n_output = nhid\n\n self.final_linear = nn.Linear(2 * nhid, n_output)\n self.final_linear_single = nn.Linear(nhid, n_output)\n # self.final_linear = nn.Linear(nhid, nclass)\n self.pa_attn = AttentionalFactorizationMachine(nhid, nhid, dropouts=[0.2, 0.2])\n self.direct = direct", "def exec_attention(self,curr_step): \n\n assert(self.curr_step_idx > 0 and self.dlist is not None), \"Step Error: Must call init before combine\" \n \n detectType = curr_step[\"detectionNetwork\"]\n paramsFile = curr_step[\"paramsFile\"]\n funclist = curr_step[\"funclist\"]\n\n #verify raw data & dlist\n # self.B_VER(self.sess_path, self.dlist)\n raw_datadir = self.sess_path\n dest_datadir = self.sess_path \n\n model_dict = {\"detectType\": detectType, \"paramsFile\" : paramsFile} \n\n for i, folder in enumerate(self.dlist):\n flist = funclist[i]\n self.data_utils.DETECT(raw_datadir, folder, dest_datadir, model_dict, flist=[], preview=False)\n self.default_vis(curr_step)", "def visualize_attention(attn_dict, target_word, image):\n\n\tw_headmap = 5\n\th_headmap = w_headmap\n\n\tw_image = 1\n\tn_heads = 4\n\n\tt_length = w_headmap * n_heads + w_image\n\n\tw_ratio = ((t_length - w_image) / n_heads) / t_length\n\th_ratio = 0.7\n\n\tseaborn.set(font_scale=2.8)\n\n\tdraw_encoder_self_attention(attn_dict, image, w_ratio, w_headmap, h_headmap, h_ratio, n_heads, start_layer=0, end_layer=6, step_size=2)\n\tdraw_decoder_self_attention(image, attn_dict, target_word, n_heads, h_headmap, w_headmap, h_ratio)", "def do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture,sigma_min,sigma_max, downsample_value):\n if np.ndim(img)==3:\n features = extract_features(\n img,\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n\n if mask is None:\n raise ValueError(\"If no classifier clf is passed, you must specify a mask.\")\n training_data = features[:, mask > 0].T\n\n training_data = memmap_feats(training_data)\n\n training_labels = mask[mask > 0].ravel()\n\n training_data = training_data[::downsample_value]\n training_labels = training_labels[::downsample_value]\n\n lim_samples = 100000 #200000\n\n if training_data.shape[0]>lim_samples:\n logging.info('Number of samples exceeds %i'% lim_samples)\n ind = np.round(np.linspace(0,training_data.shape[0]-1,lim_samples)).astype('int')\n training_data = training_data[ind,:]\n training_labels = training_labels[ind]\n logging.info('Samples have been subsampled')\n logging.info('Number of samples in training data: %i' % (training_data.shape[0]))\n print(training_data.shape)\n\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Initializing MLP model')\n\n clf.fit(training_data, training_labels)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('MLP model fit to data')\n\n del training_data, training_labels\n\n logging.info('Create and memory map model input data')\n\n data = features[:, mask == 0].T\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n data = memmap_feats(data)\n logging.info('Memory mapped model input data')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n labels = clf.predict(data)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Model used on data to estimate labels')\n\n if mask is None:\n result = labels.reshape(img.shape[:2])\n result2 = result.copy()\n else:\n result = np.copy(mask)#+1\n result[mask == 0] = labels\n del labels, mask\n result2 = result.copy()\n del result\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF feature extraction and model fitting complete')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2", "def show_sent_attention(self, x):\n att_layer = self.model_sent.get_layer('sent_attention')\n prev_tensor = att_layer.input\n\n dummy_layer = Lambda(\n lambda x: att_layer._get_attention_weights(x)\n )(prev_tensor)\n\n return Model(self.model_sent.input, dummy_layer).predict(x)", "def __init__(self, bert_layer, config):\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\n self.out_proj_weight = bert_layer.attention.out_lin.weight\n self.out_proj_bias = bert_layer.attention.out_lin.bias\n self.linear1_weight = bert_layer.ffn.lin1.weight\n self.linear1_bias = bert_layer.ffn.lin1.bias\n self.linear2_weight = bert_layer.ffn.lin2.weight\n self.linear2_bias = bert_layer.ffn.lin2.bias\n self.norm1_eps = bert_layer.sa_layer_norm.eps\n self.norm1_weight = bert_layer.sa_layer_norm.weight\n self.norm1_bias = bert_layer.sa_layer_norm.bias\n self.norm2_eps = bert_layer.output_layer_norm.eps\n self.norm2_weight = bert_layer.output_layer_norm.weight\n self.norm2_bias = bert_layer.output_layer_norm.bias\n self.num_heads = bert_layer.attention.n_heads\n self.embed_dim = bert_layer.attention.dim\n self.is_last_layer = False\n self.validate_bettertransformer()", "def generator():\n\n model = Sequential()\n\n in_shape = 100\n\n depth = 256\n\n model.add(Dense(depth * 7 * 7, input_shape=(in_shape,)))\n model.add(BatchNormalization(momentum=0.9)) # add the momentum\n # model.add(Activation('relu')) # pass the vector through a relu\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Reshape((7, 7, depth))) # reshape to depth number of 7x7 images\n model.add(Dropout(0.4))\n\n model.add(UpSampling2D())\n model.add(Conv2DTranspose(int(depth / 2), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(UpSampling2D())\n model.add(Conv2DTranspose(int(depth / 4), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Conv2DTranspose(int(depth / 8), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Conv2DTranspose(1, 5, padding='same'))\n model.add(Activation('sigmoid'))\n\n # model.summary()\n\n noise = Input(shape=(in_shape,))\n img = model(noise)\n\n return Model(noise, img)\n\n # return model", "def __init__(self, model, batch_size=1, confidence=CONFIDENCE,\n targeted=TARGETED, learning_rate=LEARNING_RATE,\n binary_search_steps=BINARY_SEARCH_STEPS, max_iterations=MAX_ITERATIONS, print_every=100, early_stop_iters=0,\n abort_early=ABORT_EARLY,\n initial_c=INITIAL_C,\n use_log=True, use_tanh=True, use_resize=False, adam_beta1=0.9, adam_beta2=0.999, reset_adam_after_found=False,\n solver=\"adam\", save_ckpts=\"\", load_checkpoint=\"\", start_iter=0,\n init_size=32, use_importance=False, device=\"cuda\"):\n\n if solver != \"fake_zero\":\n torch.set_grad_enabled(False)\n\n self.image_size, self.num_channels, num_labels = model.image_size, model.num_channels, model.num_labels\n self.model = model\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.print_every = print_every\n self.early_stop_iters = early_stop_iters if early_stop_iters != 0 else max_iterations // 10\n print(\"early stop:\", self.early_stop_iters)\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.initial_c = initial_c\n self.start_iter = start_iter\n self.batch_size = batch_size\n self.num_channels = self.num_channels\n self.resize_init_size = init_size\n self.use_importance = use_importance\n if use_resize:\n self.small_x = self.resize_init_size\n self.small_y = self.resize_init_size\n else:\n self.small_x = self.image_size\n self.small_y = self.image_size\n\n self.use_tanh = use_tanh\n self.use_resize = use_resize\n self.save_ckpts = save_ckpts\n if save_ckpts:\n os.system(\"mkdir -p {}\".format(save_ckpts))\n\n self.repeat = binary_search_steps >= 10\n self.device = device\n\n # each batch has a different modifier value (see below) to evaluate\n # small_shape = (None,self.small_x,self.small_y,num_channels)\n\n single_shape = (self.num_channels, self.image_size, self.image_size)\n small_single_shape = (self.num_channels, self.small_x, self.small_y)\n\n # the variable we're going to optimize over\n # support multiple batches\n # support any size image, will be resized to model native size\n\n # the real variable, initialized to 0\n self.load_checkpoint = load_checkpoint\n if load_checkpoint:\n # if checkpoint is incorrect reshape will fail\n print(\"Using checkpint\", load_checkpoint)\n self.real_modifier = torch.load(load_checkpoint).reshape(\n (1,) + small_single_shape, map_location=torch.device(device))\n else:\n self.real_modifier = torch.zeros(\n (1,) + small_single_shape, dtype=torch.float32, device=self.device)\n\n if solver == \"fake_zero\":\n self.real_modifier.requires_grad = True\n # self.real_modifier = np.random.randn(image_size * image_size * num_channels).astype(torch.float32).reshape((1,) + single_shape)\n # self.real_modifier /= np.linalg.norm(self.real_modifier)\n # these are variables to be more efficient in sending data to tf\n # we only work on 1 image at once; the batch is for evaluation loss at different modifiers\n self.true_img = torch.zeros(single_shape, device=self.device)\n self.true_label_1hot = torch.zeros(num_labels, device=self.device)\n self.c = 0.0\n\n # prepare the list of all valid variables\n var_size = self.small_x * self.small_y * self.num_channels\n self.use_var_len = var_size\n self.var_list = torch.tensor(\n range(0, self.use_var_len), dtype=torch.int64, device=self.device)\n self.used_var_list = torch.zeros(\n var_size, dtype=torch.int64, device=self.device)\n self.sample_prob = torch.ones(\n var_size, dtype=torch.float32, device=self.device) / var_size\n\n # upper and lower bounds for the modifier\n self.modifier_up = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n self.modifier_down = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n\n # random permutation for coordinate update\n self.perm = torch.randperm(var_size)\n self.perm_index = 0\n\n # ADAM status\n self.mt = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n self.vt = torch.zeros(\n var_size, dtype=torch.float32, device=self.device)\n # self.beta1 = 0.8\n # self.beta2 = 0.99\n self.beta1 = adam_beta1\n self.beta2 = adam_beta2\n self.reset_adam_after_found = reset_adam_after_found\n self.adam_epoch = torch.ones(\n var_size, dtype=torch.int64, device=self.device)\n self.stage = 0\n # variables used during optimization process\n self.grad = torch.zeros(\n batch_size, dtype=torch.float32, device=self.device)\n self.hess = torch.zeros(\n batch_size, dtype=torch.float32, device=self.device)\n # compile numba function\n # self.coordinate_ADAM_numba = jit(coordinate_ADAM, nopython = True)\n # self.coordinate_ADAM_numba.recompile()\n # print(self.coordinate_ADAM_numba.inspect_llvm())\n # np.set_printoptions(threshold=np.nan)\n # set solver\n solver = solver.lower()\n self.solver_name = solver\n if solver == \"adam\":\n self.solver = coordinate_ADAM\n if solver == \"adam_torch\":\n self.solver = coordinate_ADAM_torch\n elif solver == \"newton\":\n self.solver = coordinate_Newton\n elif solver == \"adam_newton\":\n self.solver = coordinate_Newton_ADAM\n elif solver != \"fake_zero\":\n print(\"unknown solver\", solver)\n self.solver = coordinate_ADAM\n print(\"Using\", solver, \"solver\")", "def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(GAT, self).__init__()\n self.dropout = dropout\n self.xent = nn.CrossEntropyLoss()\n\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)", "def __init__(self, in_classes=1, channelscale=64):\n super(DetectPatchAndSegm, self).__init__()\n self.unetvgg1 = UNetSimple(in_classes=in_classes, channelscale=64, out_classes=2)#UNetVgg()\n self.unetvgg2 = UNetSimple(in_classes=3, channelscale=128, out_classes=3)#in is 2 patches and original image\n self.sft = nn.Softmax2d()", "def attention_module_multi_head(roi_feat, position_embedding,\n nongt_dim, fc_dim, feat_dim,\n dim=(1024, 1024, 1024),\n group=16, index=1):\n dim_group = (dim[0] / group, dim[1] / group, dim[2] / group)\n nongt_roi_feat = mx.symbol.slice_axis(data=roi_feat, axis=0, begin=0, end=nongt_dim)\n # [num_rois * nongt_dim, emb_dim]\n position_embedding_reshape = mx.sym.Reshape(position_embedding, shape=(-3, -2))\n # position_feat_1, [num_rois * nongt_dim, fc_dim]\n position_feat_1 = mx.sym.FullyConnected(name='pair_pos_fc1_' + str(index),\n data=position_embedding_reshape,\n num_hidden=fc_dim)\n position_feat_1_relu = mx.sym.Activation(data=position_feat_1, act_type='relu')\n # aff_weight, [num_rois, nongt_dim, fc_dim]\n aff_weight = mx.sym.Reshape(position_feat_1_relu, shape=(-1, nongt_dim, fc_dim))\n # aff_weight, [num_rois, fc_dim, nongt_dim]\n aff_weight = mx.sym.transpose(aff_weight, axes=(0, 2, 1))\n\n # multi head\n assert dim[0] == dim[1], 'Matrix multiply requires same dimensions!'\n q_data = mx.sym.FullyConnected(name='query_' + str(index),\n data=roi_feat,\n num_hidden=dim[0])\n q_data_batch = mx.sym.Reshape(q_data, shape=(-1, group, dim_group[0]))\n q_data_batch = mx.sym.transpose(q_data_batch, axes=(1, 0, 2))\n k_data = mx.symbol.FullyConnected(name='key_' + str(index),\n data=nongt_roi_feat,\n num_hidden=dim[1])\n k_data_batch = mx.sym.Reshape(k_data, shape=(-1, group, dim_group[1]))\n k_data_batch = mx.sym.transpose(k_data_batch, axes=(1, 0, 2))\n v_data = nongt_roi_feat\n # v_data = mx.symbol.FullyConnected(name='value_'+str(index)+'_'+str(gid), data=roi_feat, num_hidden=dim_group[2])\n aff = mx.symbol.batch_dot(lhs=q_data_batch, rhs=k_data_batch, transpose_a=False, transpose_b=True)\n # aff_scale, [group, num_rois, nongt_dim]\n aff_scale = (1.0 / math.sqrt(float(dim_group[1]))) * aff\n aff_scale = mx.sym.transpose(aff_scale, axes=(1, 0, 2))\n\n assert fc_dim == group, 'fc_dim != group'\n # weighted_aff, [num_rois, fc_dim, nongt_dim]\n weighted_aff = mx.sym.log(mx.sym.maximum(left=aff_weight, right=1e-6)) + aff_scale\n aff_softmax = mx.symbol.softmax(data=weighted_aff, axis=2, name='softmax_' + str(index))\n # [num_rois * fc_dim, nongt_dim]\n aff_softmax_reshape = mx.sym.Reshape(aff_softmax, shape=(-3, -2))\n # output_t, [num_rois * fc_dim, feat_dim]\n output_t = mx.symbol.dot(lhs=aff_softmax_reshape, rhs=v_data)\n # output_t, [num_rois, fc_dim * feat_dim, 1, 1]\n output_t = mx.sym.Reshape(output_t, shape=(-1, fc_dim * feat_dim, 1, 1))\n # linear_out, [num_rois, dim[2], 1, 1]\n linear_out = mx.symbol.Convolution(name='linear_out_' + str(index), data=output_t,\n kernel=(1, 1), num_filter=dim[2], num_group=fc_dim)\n output = mx.sym.Reshape(linear_out, shape=(0, 0))\n return output", "def front_column_model_p_gain():", "def _build(self):\n if self.attn:\n self.Attn = AttentionNet(self.dim_b1, channels=self.channels, name='Attn')\n self.predsb1 = self.Attn(self.xb1, is_training=self.is_training)\n self.predsb2 = self.Attn(self.xb2, is_training=self.is_training, reuse=True)\n #TODO: generators want to make their synthetics look like b1/b2 to attn model\n\n self.loss_attn = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb1, labels=tf.zeros_like(self.predsb1)))\n self.loss_attn += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb2, labels=tf.ones_like(self.predsb2)))\n\n self.attnb1 = tf.gradients(self.loss_attn, self.xb1)[0]\n self.attnb2 = tf.gradients(self.loss_attn, self.xb2)[0]\n\n self.attnb1 = tf.abs(self.attnb1)\n self.attnb1 = self.attnb1 / tf.reduce_sum(self.attnb1, axis=1, keep_dims=True)\n self.attnb1 = self.attnb1 / tf.reduce_max(self.attnb1, axis=1, keep_dims=True)\n\n self.attnb2 = tf.abs(self.attnb2)\n self.attnb2 = self.attnb2 / tf.reduce_sum(self.attnb2, axis=1, keep_dims=True)\n self.attnb2 = self.attnb2 / tf.reduce_max(self.attnb2, axis=1, keep_dims=True)\n\n self.attnb1 = nameop(self.attnb1, 'attnb1')\n self.attnb2 = nameop(self.attnb2, 'attnb2')\n\n self.G12 = GeneratorResnet(self.dim_b1, self.dim_b2, channels=self.channels, name='G12')\n self.Gb2 = self.G12(self.xb1, is_training=self.is_training)\n self.Gb2 = nameop(self.Gb2, 'Gb2')\n\n self.G21 = GeneratorResnet(self.dim_b2, self.dim_b1, channels=self.channels, name='G21')\n self.Gb1 = self.G21(self.xb2, is_training=self.is_training)\n self.Gb1 = nameop(self.Gb1, 'Gb1')\n\n\n self.Gb2_reconstructed = self.G12(self.Gb1, is_training=self.is_training, reuse=True)\n self.Gb1_reconstructed = self.G21(self.Gb2, is_training=self.is_training, reuse=True)\n\n self.Gb1_reconstructed = nameop(self.Gb1_reconstructed, 'xb1_reconstructed')\n self.Gb2_reconstructed = nameop(self.Gb2_reconstructed, 'xb2_reconstructed')\n\n self.D1 = Discriminator(self.dim_b1, 1, channels=self.channels, name='D1')\n self.D2 = Discriminator(self.dim_b2, 1, channels=self.channels, name='D2')\n\n self.D1_probs_z = self.D1(self.xb1, is_training=self.is_training)\n self.D1_probs_G = self.D1(self.Gb1, is_training=self.is_training, reuse=True)\n self.D1_probs_z = nameop(self.D1_probs_z, 'D1_probs_z')\n self.D1_probs_G = nameop(self.D1_probs_G, 'D1_probs_G')\n\n self.D2_probs_z = self.D2(self.xb2, is_training=self.is_training)\n self.D2_probs_G = self.D2(self.Gb2, is_training=self.is_training, reuse=True)\n self.D2_probs_z = nameop(self.D2_probs_z, 'D2_probs_z')\n self.D2_probs_G = nameop(self.D2_probs_G, 'D2_probs_G')\n\n self._build_loss()\n\n self._build_optimization()", "def __init__(self,nback=1,ntokens_pm=2,ntokens_og=3,stimdim=2,seed=99):\n np.random.seed(seed)\n tr.manual_seed(seed)\n self.nback = nback\n # embedding\n self.ntokens_pm = ntokens_pm\n self.ntokens_og = ntokens_og\n self.stimdim = stimdim\n # emat\n self.randomize_emat()\n return None", "def bias_prior(self):", "def process_0(self):\n raw_data = self.pull_data(self.sub_folder)\n\n prepped_data = self._prep_data(raw_data)\n\n print(len(prepped_data))\n\n\n gmm = GaussianMixture(5)\n\n gmm.fit(prepped_data)\n\n return gmm.means_", "def AutoClassImpute(data,cellwise_norm=True,log1p=True,encoder_layer_size=[256,128],dropout_rate=0,epochs=300,classifier_weight=0.3,\n num_cluster=[4,5,6],reg_ae=0.001,reg_cf=0.001,batch_size=32,verbose=0,truelabel=[],\n npc=15,es=30,lr=15):\n t1 = time.time()\n AC = AutoClass()\n if cellwise_norm:\n libs = data.sum(axis = 1)\n norm_fact = np.diag(np.median(libs)/libs)\n data = np.dot(norm_fact,data)\n \n if log1p:\n data = np.log2(data + 1.) \n \n \n AC.set_input_data(data)\n AC.set_dropout_rate(dropout_rate)\n AC.set_epochs(epochs)\n AC.set_encoder_layer_size(encoder_layer_size)\n AC.set_batch_size(batch_size)\n AC.set_verbose(verbose)\n AC.set_npc(npc)\n AC.set_early_stopping(es)\n AC.set_reduce_lr(lr)\n AC.set_reg_ae(reg_ae)\n AC.set_reg_cf(reg_cf)\n AC.set_classifier_weight(classifier_weight)\n \n\n \n ncell = AC.ncell\n ngene = AC.ngene\n print('{} cells and {} genes'.format(ncell,ngene))\n ACs = []\n if classifier_weight == 0:\n print('no classifier layer')\n AC.create_model()\n AC.run_model()\n ACs.append(AC)\n imps = AC.imp\n else:\n if len(truelabel)>0:\n print('use true label')\n AC.set_truelabel(truelabel)\n AC.create_model()\n AC.run_model()\n ACs.append(AC)\n imps = AC.imp\n else: \n imps = np.zeros((ncell,ngene))\n for n_cluster in num_cluster:\n print('n_cluster = {}'.format(n_cluster))\n AC.set_n_cluster(n_cluster)\n AC.cluster()\n AC.create_model()\n AC.run_model()\n imps = imps + AC.imp\n ACs.append(AC)\n imps = imps / len(num_cluster)\n print('escape time is: {}'.format(time.time()-t1))\n return imps, ACs", "def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(GAT, self).__init__()\n self.dropout = dropout\n self.outc=nclass\n self.FC=nn.Parameter(torch.zeros(size=(nhid*nheads, self.outc)))\n nn.init.xavier_uniform_(self.FC.data, gain=1.414)\n\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n # self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)", "def train_supervised_modified(labelled_sequences, extra_transition, estimator=None):\n\n # default to the MLE estimate\n if estimator is None:\n estimator = lambda fdist, bins: MLEProbDist(fdist)\n\n # count occurrences of starting states, transitions out of each state\n # and output symbols observed in each state\n known_symbols = []\n known_states = []\n\n starting = FreqDist()\n transitions = ConditionalFreqDist()\n outputs = ConditionalFreqDist()\n for sequence in labelled_sequences:\n lasts = None\n for token in sequence:\n state = token[0]\n symbol = token[1]\n if lasts is None:\n starting[state] += 1\n else:\n transitions[lasts][state] += 1\n outputs[state][symbol] += 1\n lasts = state\n\n # update the state and symbol lists\n if state not in known_states:\n known_states.append(state)\n\n if symbol not in known_symbols:\n known_symbols.append(symbol)\n\n extra_sequences = extra_text_import()\n for sequence in extra_sequences:\n lasts = None\n for token in sequence:\n state = token\n if lasts is None:\n starting[state] += 1\n else:\n transitions[lasts][state] += 1\n lasts = state\n\n # update the state and symbol lists\n if state not in known_states:\n known_states.append(state)\n\n\n\n # create probability distributions (with smoothing)\n N = len(known_states)\n # print(\"known_states\", known_states)\n # print(\"len known\")\n # print(N)\n pi = estimator(starting, N)\n A = ConditionalProbDist(transitions, estimator, N)\n B = ConditionalProbDist(outputs, estimator, len(known_symbols))\n\n return hmm.HiddenMarkovModelTagger(known_states, known_symbols, A, B, pi)", "def init_weights_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')", "def __init__(self, cfg, momentum=False):\n\n super().__init__()\n self.ent_output_size = cfg.ent_output_size\n self.span_batch_size = cfg.span_batch_size\n self.position_embedding_dims = cfg.position_embedding_dims\n self.att_size = cfg.att_size\n self.momentum = momentum\n self.activation = nn.GELU()\n self.device = cfg.device\n\n self.bert_encoder = BertEncoder(bert_model_name=cfg.bert_model_name,\n trainable=cfg.fine_tune,\n output_size=cfg.bert_output_size,\n activation=self.activation)\n\n self.entity_span_extractor = CNNSpanExtractor(\n input_size=self.bert_encoder.get_output_dims(),\n num_filters=cfg.entity_cnn_output_channels,\n ngram_filter_sizes=cfg.entity_cnn_kernel_sizes,\n dropout=cfg.dropout)\n\n if self.ent_output_size > 0:\n self.ent2hidden = BertLinear(input_size=self.entity_span_extractor.get_output_dims(),\n output_size=self.ent_output_size,\n activation=self.activation,\n dropout=cfg.dropout)\n else:\n self.ent_output_size = self.entity_span_extractor.get_output_dims()\n self.ent2hidden = lambda x: x\n\n self.entity_span_mlp = BertLinear(input_size=self.ent_output_size,\n output_size=self.ent_output_size,\n activation=self.activation,\n dropout=cfg.dropout)\n self.entity_span_decoder = VanillaSoftmaxDecoder(hidden_size=self.ent_output_size,\n label_size=6)\n\n self.global_position_embedding = nn.Embedding(150, 200)\n self.global_position_embedding.weight.data.normal_(mean=0.0, std=0.02)\n self.masked_token_mlp = BertLinear(input_size=self.bert_encoder.get_output_dims() + 200,\n output_size=self.bert_encoder.get_output_dims(),\n activation=self.activation,\n dropout=cfg.dropout)\n self.masked_token_decoder = nn.Linear(self.bert_encoder.get_output_dims(),\n 28996,\n bias=False)\n self.masked_token_decoder.weight.data.normal_(mean=0.0, std=0.02)\n self.masked_token_decoder_bias = nn.Parameter(torch.zeros(28996))\n\n self.position_embedding = nn.Embedding(7, self.position_embedding_dims)\n self.position_embedding.weight.data.normal_(mean=0.0, std=0.02)\n self.attention_encoder = PosAwareAttEncoder(self.ent_output_size,\n self.bert_encoder.get_output_dims(),\n 2 * self.position_embedding_dims,\n self.att_size,\n activation=self.activation,\n dropout=cfg.dropout)\n\n self.mlp_head1 = BertLinear(self.ent_output_size,\n self.bert_encoder.get_output_dims(),\n activation=self.activation,\n dropout=cfg.dropout)\n self.mlp_head2 = BertLinear(self.bert_encoder.get_output_dims(),\n self.bert_encoder.get_output_dims(),\n activation=self.activation,\n dropout=cfg.dropout)\n\n self.masked_token_loss = nn.CrossEntropyLoss()", "def __init__(self, h, d_model, leaky_relu_slope=0.1, dropout=0.1, attenuation_lambda=0.1, distance_matrix_kernel='softmax'):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h # We assume d_v always equals d_k\n self.h = h\n\n self.attenuation_lambda = torch.nn.Parameter(torch.tensor(attenuation_lambda, requires_grad=True))\n\n self.linears = clones(nn.Linear(d_model, d_model), 5) # 5 for query, key, value, node update, edge update\n\n self.message = None\n self.leaky_relu_slope = leaky_relu_slope\n self.dropout = nn.Dropout(p=dropout)\n\n if distance_matrix_kernel == 'softmax':\n self.distance_matrix_kernel = lambda x: F.softmax(-x, dim=-1)\n elif distance_matrix_kernel == 'exp':\n self.distance_matrix_kernel = lambda x: torch.exp(-x)", "def attack_batch(self, img, label_1hot):\n def is_confidently_fooled(x, true_label):\n if not isinstance(x, (float, int, np.int64)) and not (isinstance(x, torch.Tensor) and x.numel() == 1):\n z = torch.clone(x)\n if self.TARGETED:\n z[true_label] -= self.CONFIDENCE\n else:\n z[true_label] += self.CONFIDENCE\n z = torch.argmax(z)\n else:\n z = x\n\n if self.TARGETED:\n return z == true_label\n else:\n return z != true_label\n\n # convert img to float32 to avoid numba error\n img = img.type(torch.float32)\n\n if torch.argmax(model(img+0.5)) != torch.argmax(label_1hot):\n print(\"Image is already misclassified.\")\n return img, 0.0\n\n # remove the extra batch dimension\n if len(img.shape) == 4:\n img = img[0]\n if len(label_1hot.shape) == 2:\n label_1hot = label_1hot[0]\n # convert to tanh-space\n if self.use_tanh:\n img = torch.arctanh(img*1.999999)\n\n # set the lower and upper bounds accordingly\n c_lower_bound = 0.0\n c = self.initial_c\n c_upper_bound = 1e10\n\n # set the upper and lower bounds for the modifier\n if not self.use_tanh:\n self.modifier_up = 0.5 - img.reshape(-1)\n self.modifier_down = -0.5 - img.reshape(-1)\n\n # clear the modifier\n # if not self.load_checkpoint:\n # if self.use_resize:\n # self.resize_img(self.resize_init_size,\n # self.resize_init_size, True)\n # else:\n # self.real_modifier = torch.zeros(\n # (1,) + (self.num_channels, self.small_x, self.small_y), dtype=torch.float32, device=self.device)\n # if self.solver_name == \"fake_zero\":\n # self.real_modifier.requires_grad = True\n\n # the best l2, score, and image attack\n outer_best_c = c\n outer_best_l2 = 1e10\n outer_best_score = -1\n if self.use_tanh:\n outer_best_adv = torch.tanh(img)/2\n else:\n outer_best_adv = img\n\n for outer_step in range(self.BINARY_SEARCH_STEPS):\n print(outer_best_l2)\n\n best_l2 = 1e10\n best_score = -1\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS-1:\n c = c_upper_bound\n\n # set the variables so that we don't have to send them over again\n # self.setup = []\n self.true_img = img.detach().clone()\n self.true_label_1hot = label_1hot.detach().clone()\n self.c = c\n # self.setup = [self.true_img, self.true_label_1hot, self.c]\n\n # use the current best model\n # np.copyto(self.real_modifier, outer_best_adv - img)\n # use the model left by last constant change\n\n prev_loss = 1e6\n train_timer = 0.0\n last_loss1 = 1.0\n if not self.load_checkpoint:\n if self.use_resize:\n self.resize_img(self.resize_init_size,\n self.resize_init_size, True)\n else:\n self.real_modifier = torch.zeros(\n (1,) + (self.num_channels, self.small_x, self.small_y), dtype=torch.float32, device=self.device)\n if self.solver_name == \"fake_zero\":\n self.real_modifier.requires_grad = True\n\n # reset ADAM status\n self.mt.fill_(0.0)\n self.vt.fill_(0.0)\n self.adam_epoch.fill_(1)\n self.stage = 0\n multiplier = 1\n eval_costs = 0\n if self.solver_name != \"fake_zero\":\n multiplier = 24\n for iteration in range(self.start_iter, self.MAX_ITERATIONS):\n if self.use_resize:\n if iteration == 2000:\n # if iteration == 2000 // 24:\n self.resize_img(64, 64)\n if iteration == 10000:\n # if iteration == 2000 // 24 + (10000 - 2000) // 96:\n self.resize_img(128, 128)\n # if iteration == 200*30:\n # if iteration == 250 * multiplier:\n # self.resize_img(256,256)\n # print out the losses every 10%\n if iteration % (self.print_every) == 0:\n # print(iteration,self.sess.run((self.total_loss,self.real,self.other,self.loss1,self.loss2), feed_dict={self.modifier: self.real_modifier}))\n\n self.compute_loss(self.real_modifier)\n\n total_loss, real, other, loss1, loss2 = self.total_loss, self.real, self.other, self.loss1, self.loss2\n print(\"[STATS][L2] iter = {}, cost = {}, time = {:.3f}, size = {}, loss = {:.5g}, real = {:.5g}, other = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}\".format(\n iteration, eval_costs, train_timer, self.real_modifier.shape, total_loss[0], real[0], other[0], loss1[0], loss2[0]))\n sys.stdout.flush()\n # np.save('black_iter_{}'.format(iteration), self.real_modifier)\n\n attack_begin_time = time.time()\n # perform the attack\n if self.solver_name == \"fake_zero\":\n total_loss, l2, loss1, loss2, score, nimg = self.fake_blackbox_optimizer()\n else:\n total_loss, l2, loss1, loss2, score, nimg = self.blackbox_optimizer(\n iteration)\n\n if self.solver_name == \"fake_zero\":\n eval_costs += self.real_modifier.numel()\n else:\n eval_costs += self.batch_size\n\n # reset ADAM states when a valid example has been found\n if loss1 == 0.0 and last_loss1 != 0.0 and self.stage == 0:\n # we have reached the fine tunning point\n # reset ADAM to avoid overshoot\n if self.reset_adam_after_found:\n self.mt.fill_(0.0)\n self.vt.fill_(0.0)\n self.adam_epoch.fill_(1)\n self.stage = 1\n last_loss1 = loss1\n\n # check if we should abort search if we're getting nowhere.\n # if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS//10) == 0:\n if self.ABORT_EARLY and iteration % self.early_stop_iters == 0:\n if total_loss > prev_loss*.9999:\n print(\"Early stopping because there is no improvement\")\n break\n prev_loss = total_loss\n\n # adjust the best result found so far\n # the best attack should have the target class with the largest value,\n # and has smallest l2 distance\n\n if l2 < best_l2 and is_confidently_fooled(score, torch.argmax(label_1hot)):\n best_l2 = l2\n best_score = torch.argmax(score)\n if l2 < outer_best_l2 and is_confidently_fooled(score, torch.argmax(label_1hot)):\n # print a message if it is the first attack found\n if outer_best_l2 == 1e10:\n print(\"[STATS][L3](First valid attack found!) iter = {}, cost = {}, time = {:.3f}, size = {}, loss = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}, l2 = {:.5g}\".format(\n iteration, eval_costs, train_timer, self.real_modifier.shape, total_loss, loss1, loss2, l2))\n sys.stdout.flush()\n outer_best_l2 = l2\n outer_best_score = torch.argmax(score)\n outer_best_adv = nimg\n outer_best_c = c\n\n train_timer += time.time() - attack_begin_time\n\n # adjust the constant as needed\n\n if is_confidently_fooled(best_score, torch.argmax(label_1hot)) and best_score != -1:\n # success, divide const by two\n print('old c: ', c)\n c_upper_bound = min(c_upper_bound, c)\n if c_upper_bound < 1e9:\n c = (c_lower_bound + c_upper_bound)/2\n print('new c: ', c)\n else:\n # failure, either multiply by 10 if no solution found yet\n # or do binary search with the known upper bound\n print('old c: ', c)\n c_lower_bound = max(c_lower_bound, c)\n if c_upper_bound < 1e9:\n c = (c_lower_bound + c_upper_bound)/2\n else:\n c *= 10\n print('new c: ', c)\n\n if self.use_tanh:\n img = torch.tanh(img)/2\n\n # return the best solution found\n return outer_best_adv, outer_best_c", "def sample_model(model, x, y, params_init, model_loss='multi_class_linear_output' ,num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, tau_out=1.,tau_list=None, store_on_GPU = True, desired_accept_rate=0.8, verbose = False):\n\n device = params_init.device\n params_shape_list = []\n params_flattened_list = []\n build_tau = False\n if tau_list is None:\n tau_list = []\n build_tau = True\n for weights in model.parameters():\n params_shape_list.append(weights.shape)\n params_flattened_list.append(weights.nelement())\n if build_tau:\n tau_list.append(torch.tensor(1.))\n\n log_prob_func = define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=normalizing_const, device = device)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU, verbose = verbose)", "def __init__(self, h, d_model, dropout=0.1):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)", "def forward(self, images, text=None, mask=None):\n b, c, fh, fw = images.shape\n images = self.patch_embedding(images) # b,d,gh,gw\n image_patchified = images.flatten(2).transpose(1, 2) # b,gh*gw,d\n #image_patchified = einops.rearrange(x, 'b d gh gw -> b (gh gw) d')\n \n if hasattr(self, 'class_token'):\n x = torch.cat((self.class_token.expand(b, -1, -1), image_patchified), dim=1) # b,gh*gw+1,d\n if hasattr(self, 'positional_embedding'): \n x = self.positional_embedding(x) # b,gh*gw+1,d\n\n # concatenate text to images\n if hasattr(self, 'text_embeddings'):\n text = self.text_embeddings(text) #b, max_text_seq_len > b, max_text_seq_len, d\n x = torch.cat((x, text), dim=1) #b, gh*gw+1+max_text_seq_len,d\n \n if self.ret_interm_repr and self.ret_attn_scores:\n x, interm_repr, scores = self.transformer(x, mask)\n elif self.ret_interm_repr:\n x, interm_repr = self.transformer(x, mask)\n elif self.ret_attn_scores:\n x, scores = self.transformer(x, mask) # b,gh*gw+1,d\n else:\n x = self.transformer(x, mask)\n \n if hasattr(self, 'pre_logits'):\n x = self.pre_logits(x) # b,d\n x = torch.tanh(x) # b,d\n \n if hasattr(self, 'fc'):\n x = self.norm(x)[:, 0] # b,d\n x = self.fc(x) # b,num_classes\n \n if self.ret_image_patchified and self.ret_interm_repr and self.ret_attn_scores:\n return x, interm_repr, scores, image_patchified\n \n elif self.ret_interm_repr and self.ret_attn_scores:\n return x, interm_repr, scores\n elif self.ret_interm_repr and self.ret_image_patchified:\n return x, interm_repr, image_patchified\n elif self.ret_image_patchified and self.ret_attn_scores:\n return x, scores, image_patchified\n \n elif self.ret_interm_repr:\n return x, interm_repr\n elif self.ret_image_patchified:\n return x, image_patchified\n elif self.ret_attn_scores:\n return x, scores\n \n else:\n return x", "def __init__(self, num_nodes, nfeat, nhid, relation_dim, dropout, alpha, nheads):\n\t\tsuper(SpGAT, self).__init__()\n\n\t\tself.dropout\t\t= dropout\n\t\tself.dropout_layer\t= nn.Dropout(self.dropout)\n\t\tself.attentions\t\t= [SpGraphAttentionLayer(num_nodes, nfeat, nhid, relation_dim, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n\n\t\tfor i, attention in enumerate(self.attentions):\n\t\t\tself.add_module('attention_{}'.format(i), attention)\n\n\t\t# W matrix to convert h_input to h_output dimension\n\t\tself.W = nn.Parameter(torch.zeros(size=(relation_dim, nheads * nhid)))\n\t\tnn.init.xavier_uniform_(self.W.data, gain=1.414)\n\n\t\tself.out_att = SpGraphAttentionLayer(num_nodes, nhid * nheads, nheads * nhid, nheads * nhid, dropout=dropout, alpha=alpha, concat=False )", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def __init__( self, weights, topics ):\n\n # Number of topics and dictionary size\n self.W, self.K = topics.shape\n assert( self.W > self.K )\n\n self.topics = topics\n MixtureModel.__init__(self, weights, topics)", "def __init__(self, n_components=5, user=\"user_0\", model=\"gaussian\"):\n self.user = user\n self.n_components = n_components\n\n if(model==\"gaussian\"):\n self.model = hmm.GaussianHMM(n_components=n_components, covariance_type=\"diag\", \\\n init_params=\"cm\", params=\"cmt\")\n elif(model==\"GMMHMM\"):\n self.model = hmm.GMMHMM(n_components=n_components, n_mix=3, covariance_type=\"diag\", \\\n init_params=\"cm\", params=\"cmt\")\n self.model.gmms_ = [sklearn.mixture.GaussianMixture()]*3\n\n self.model.startprob_ = np.concatenate(([1],np.zeros(n_components-1)))\n self.model.transmat_ = self.compute_trans_matrix( n_components )\n\n self.overall_accuracy = 0", "def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n\n # Compute augmentation size\n aug_expansion_factor = getattr(self.params, 'augmentation_expansion_factor', None)\n aug_expansion_sz = self.img_sample_sz.clone()\n aug_output_sz = None\n if aug_expansion_factor is not None and aug_expansion_factor != 1:\n aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n aug_expansion_sz = aug_expansion_sz.float()\n aug_output_sz = self.img_sample_sz.long().tolist()\n\n # Random shift operator\n get_rand_shift = lambda: None\n random_shift_factor = getattr(self.params, 'random_shift_factor', 0)\n if random_shift_factor > 0:\n get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor).long().tolist()\n\n # Create transofmations\n self.transforms = [augmentation.Identity(aug_output_sz)]\n if 'shift' in self.params.augmentation:\n self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']])\n if 'relativeshift' in self.params.augmentation:\n get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']])\n if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:\n self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n if 'blur' in self.params.augmentation:\n self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']])\n if 'scale' in self.params.augmentation:\n self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']])\n if 'rotate' in self.params.augmentation:\n self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']])\n\n # Generate initial samples\n init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)\n\n # Remove augmented samples for those that shall not have\n for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n if not use_aug:\n init_samples[i] = init_samples[i][0:1, ...]\n\n # Add dropout samples\n if 'dropout' in self.params.augmentation:\n num, prob = self.params.augmentation['dropout']\n self.transforms.extend(self.transforms[:1]*num)\n for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n if use_aug:\n init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n return init_samples", "def create_model(nb_feats=25,emat=embedding_matrix):\n VOCAB = len(word2ix)\n EMBED_HIDDEN_SIZE = 300\n MAX_LEN = 35\n MAX_CHARLEN = 5\n SENT_HIDDEN_SIZE = 100\n ACTIVATION = 'elu'\n RNN_HIDDEN_SIZE = 50\n DP = 0.25\n L2 = 4e-6\n \n embed_word = Embedding(VOCAB, EMBED_HIDDEN_SIZE, weights=[emat], input_length=MAX_LEN, trainable=False)\n embed_code = Embedding(len(code2Idx), len(code2Idx), input_length=MAX_LEN, trainable=True)\n translate = TimeDistributed(Dense(units=SENT_HIDDEN_SIZE, activation=ACTIVATION))\n encode = Bidirectional(recurrent.LSTM(units=RNN_HIDDEN_SIZE, return_sequences=False, kernel_initializer='glorot_uniform', dropout=DP, recurrent_dropout=DP), name='my_lstm')\n\n # input defined: 8 tensors\n seq_title = Input(shape=(MAX_LEN,), dtype='int32') # title\n seq_title_code = Input(shape=(MAX_LEN,), dtype='int32')\n seq_title_char = Input(shape=(MAX_LEN,MAX_CHARLEN), dtype='int32')\n seq_cat= Input(shape=(MAX_LEN,), dtype='int32') # joint cats\n seq_cat_code = Input(shape=(MAX_LEN,), dtype='int32')\n seq_cat_char = Input(shape=(MAX_LEN,MAX_CHARLEN), dtype='int32')\n dense_input = Input(shape=(nb_feats,), dtype='float32')\n \n # char\n charem_full = create_charem()\n \n # rnn encode\n seq = embed_word(seq_title)\n seq = Dropout(DP)(seq)\n seq = translate(seq)\n code = embed_code(seq_title_code)\n char = charem_full(seq_title_char)\n seq = concatenate([seq,code,char])\n seq = encode(seq)\n \n seq3 = embed_word(seq_cat)\n seq3 = Dropout(DP)(seq3)\n seq3 = translate(seq3)\n code3 = embed_code(seq_cat_code)\n char3 = charem_full(seq_cat_char)\n seq3 = concatenate([seq3,code3,char3])\n seq3 = encode(seq3)\n \n # dense\n den = BatchNormalization()(dense_input)\n den = Dense(100, activation=ACTIVATION)(den)\n den = Dropout(DP)(den)\n\n #joint1: LOGLOSS vs RMSE\n joint = concatenate([seq,seq3,den])\n joint = Dense(units=150, activation=ACTIVATION, kernel_regularizer=l2(L2) if L2 else None, kernel_initializer='he_normal')(joint)\n joint = PReLU()(joint)\n joint = Dropout(DP)(joint)\n joint = BatchNormalization()(joint)\n \n joint = maximum([Dense(units=100, activation=ACTIVATION, kernel_regularizer=l2(L2) if L2 else None, kernel_initializer='he_normal')(joint) for _ in range(5)])\n joint = PReLU()(joint)\n joint = Dropout(DP)(joint)\n joint = BatchNormalization()(joint)\n\n score1 = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(L2) if L2 else None, kernel_initializer='he_normal',name='logloss')(joint)\n score2 = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(L2) if L2 else None, kernel_initializer='he_normal',name='mse')(joint)\n\n # plug all in one\n model2 = Model(inputs=[seq_title,seq_title_code,seq_title_char,seq_cat,seq_cat_code,seq_cat_char,dense_input], outputs=[score1,score2])\n model2.compile(optimizer='nadam', loss={'logloss': 'binary_crossentropy', 'mse': 'mean_squared_error'}, \\\n loss_weights={'logloss': 0.5, 'mse': 0.5},\n metrics=[rmse_keras])\n return model2", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def forward(self,\n img,\n x,\n init_states=None):\n img_feature = self.cnn(img)\n #x = self.linear_in(x)\n x = self.embedding(x)\n #print(x.shape)\n bs, seq_sz, _ = x.size()\n hidden_seq = []\n \n if init_states is None:\n h_t, c_t = (\n torch.zeros(bs, self.hidden_size).to(x.device),\n torch.zeros(bs, self.hidden_size).to(x.device),\n )\n else:\n h_t, c_t = init_states\n\n x_t = img_feature.reshape(bs, self.hidden_size)\n i_t = torch.sigmoid(img_feature + h_t @ self.V_i + self.b_i)\n f_t = torch.sigmoid(img_feature + h_t @ self.V_f + self.b_f)\n g_t = torch.tanh(img_feature + h_t @ self.V_c + self.b_c)\n o_t = torch.sigmoid(img_feature + h_t @ self.V_o + self.b_o)\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * torch.tanh(c_t) \n # hidden_seq.append(h_t.unsqueeze(0)) \n\n for t in range(seq_sz):\n x_t = x[:, t, :] # 4*512\n # print(x_t.shape)\n #x_t = self.bn1(x_t)\n i_t = torch.sigmoid(x_t @ self.U_i + h_t @ self.V_i + self.b_i)\n f_t = torch.sigmoid(x_t @ self.U_f + h_t @ self.V_f + self.b_f)\n g_t = torch.tanh(x_t @ self.U_c + h_t @ self.V_c + self.b_c)\n o_t = torch.sigmoid(x_t @ self.U_o + h_t @ self.V_o + self.b_o)\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * torch.tanh(c_t)# 4* 512\n\n hidden_seq.append(h_t.unsqueeze(0))\n \n #reshape hidden_seq p/ retornar\n hidden_seq = torch.cat(hidden_seq, dim=0)\n hidden_seq = hidden_seq.transpose(0, 1).contiguous()\n hidden_seq = self.linear_out(hidden_seq)\n\n seq_pred = self.softmax(hidden_seq)\n return seq_pred, (h_t, c_t)", "def __init__(self, args, number_of_labels, number_of_features,adj):\n super(SpGAT, self).__init__()\n self.args=args\n \n self.number_of_labels = number_of_labels\n self.number_of_features = number_of_features\n self.device = args.device\n self.adj= sparse_mx_to_torch_sparse_tensor(adj).to(self.device).to_dense()\n self.attentions = [SpGraphAttentionLayer(number_of_features, \n args.hidden, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=True) for _ in range(args.nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(args.hidden * args.nheads, \n args.Q, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=False)", "def __init__(self, **kwargs):\n super(BertNewsClassifier, self).__init__()\n self.PRE_TRAINED_MODEL_NAME = \"bert-base-uncased\"\n self.bert_model = BertModel.from_pretrained(self.PRE_TRAINED_MODEL_NAME)\n for param in self.bert_model.parameters():\n param.requires_grad = False\n self.drop = nn.Dropout(p=0.2)\n # assigning labels\n self.class_names = [\"World\", \"Sports\", \"Business\", \"Sci/Tech\"]\n n_classes = len(self.class_names)\n\n self.fc1 = nn.Linear(self.bert_model.config.hidden_size, 512)\n self.out = nn.Linear(512, n_classes)\n self.bert_model.embedding = self.bert_model.embeddings\n self.embedding = self.bert_model.embeddings\n\n self.scheduler = None\n self.optimizer = None\n self.args = kwargs\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()", "def __init__(self, data, noise_model, extra_data=None):\r\n self.data = data\r\n self.noise_model = noise_model\r\n self.extra_data = extra_data\r\n\r\n #Inital values\r\n self.N, self.D = self.data.shape\r\n self.is_heteroscedastic = True\r\n self.Nparams = 0\r\n self.NORMAL_CONST = ((0.5 * self.N) * np.log(2 * np.pi))\r\n\r\n self.restart()\r\n likelihood.__init__(self)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)" ]
[ "0.65408075", "0.64289623", "0.6141969", "0.6130033", "0.61221105", "0.58673143", "0.586286", "0.5794737", "0.575147", "0.573981", "0.5708623", "0.5706052", "0.57047206", "0.56855595", "0.5660355", "0.5646968", "0.55968773", "0.5591234", "0.55796915", "0.5566003", "0.55493313", "0.55406255", "0.5520946", "0.55187035", "0.551376", "0.55099267", "0.5502489", "0.5502489", "0.5480334", "0.5477024", "0.546603", "0.5462906", "0.5459233", "0.5447922", "0.54402465", "0.5434279", "0.54112816", "0.54085284", "0.53984964", "0.53947175", "0.53944224", "0.53846717", "0.53761476", "0.5365963", "0.5349691", "0.5346873", "0.5334856", "0.53202116", "0.5319707", "0.5313476", "0.53113174", "0.52976984", "0.52976245", "0.5294226", "0.5287988", "0.52877337", "0.528641", "0.52778727", "0.5258844", "0.52559364", "0.5253899", "0.5248349", "0.52438635", "0.5234735", "0.5232273", "0.5224062", "0.5211356", "0.52052414", "0.52033603", "0.52015173", "0.51971734", "0.51869553", "0.5186751", "0.5186462", "0.5183761", "0.5179092", "0.5178704", "0.5165697", "0.5164151", "0.51574284", "0.51571864", "0.51568544", "0.5156117", "0.515192", "0.51485765", "0.5146286", "0.51312006", "0.51296437", "0.51236415", "0.5121304", "0.5121276", "0.5119742", "0.5119055", "0.51172924", "0.51144737", "0.5113489", "0.5108281", "0.51082647", "0.5105846", "0.51045716", "0.5102823" ]
0.0
-1
Entry point to gameplay.
def main() -> None: game = advanced_game(MAP_FILE) root = tk.Tk() root.title('EndOfDayz') if TASK == 1: gui = BasicGraphicalInterface elif TASK == 2: gui = ImageGraphicalInterface # else: # gui = MastersGraphicalInterface app = gui(root, game.get_grid().get_size()) app.play(game) root.mainloop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game():\n pass", "def play_game():\n pass", "def game_play(self):", "def play(self):\n print('Playing game...')", "def start_game(self):\n\n\t\tpass", "def main():\n g = Game(800, 600)\n g.start()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n play_game(progression)", "async def game(self):\n pass", "def Gameloop():", "def main():\n game = Blackjack()\n game.play()", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def play_game():\n\n _initial_deal()\n\n main_window.mainloop()", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def run(self, GameState):\n pass", "def run_game_logic(self):\n pass", "def setup_game(self):", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main():\n game = RiichiMahjongApp()\n game.run()", "def game_loop(self):\n self.interface.game_loop(self)", "def play(self):\n pass", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def oneGame():\n playOneGame()", "def run(self):\n self.soundtrack.play(-1, 0, 2000)\n pygame.time.set_timer(Game.REFRESH_EVENT, 1000 // Game.FPS)\n\n while 1 < 2:\n event = pygame.event.wait()\n\n # Android-specific: always be ready to sleep\n if android:\n if android.check_pause():\n android.wait_for_resume()\n\n # Refresh display\n if event.type == Game.REFRESH_EVENT:\n # Android-specific: keep the soundtrack playing\n if android:\n android.mixer.periodic()\n\n self.draw()\n self.physics()\n pygame.display.flip()\n\n # The announcement is over---start playing\n elif event.type == Game.ANNOUNCE_EVENT:\n pygame.time.set_timer(Game.ANNOUNCE_EVENT, 0)\n self.state = Game.PLAY_STATE\n\n # The congratulations is over---announce new target\n elif event.type == Game.BRAVO_EVENT:\n pygame.time.set_timer(Game.BRAVO_EVENT, 0)\n self.announce_target()\n\n # The user clicked somewhere\n elif event.type == pygame.MOUSEBUTTONDOWN \\\n and self.state != Game.BRAVO_STATE:\n self.clicked(event.pos)\n\n # The user hit escape (or back); quit\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n break", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def __init__(self):\n self.gameloop()", "def GAME_LOOP():\n pass", "def main():\n \n # load_and_initialize_func()\n\n loop_and_update_forever()\n\n pygame.quit()", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def start_of_game(self):\n pass", "def main():\n game = Hangman()\n game.play_hangman()", "def main():\n display, clock = game.init_pygame()\n highscores = HighScores(display, clock)\n highscores.run()", "def run(self) -> None:\n pg.mixer.init()\n pg.mixer.music.load(path.join(sound_dir, 'theme.wav'))\n pg.mixer.music.set_volume(0.1)\n pg.mixer.music.play(-1, fade_ms=1000)\n while True:\n if self.state == State.MENU:\n self.show_menu()\n if self.state == State.GAME_OVER:\n self.show_game_over_screen()\n if self.state == State.PLAY:\n self.run_game()", "def main():\n secret_word = get_word()\n play_game(secret_word)", "def run(self):\n\n machine = StateMachine()\n\n while machine.state != GameState.END:\n\n if machine.state == GameState.PLAYING:\n machine.state = self.play(self.screen, self)\n elif machine.state == GameState.INTRO:\n machine.state = Intro.run(self.screen)\n elif machine.state == GameState.MENU:\n machine.state = Menu.run(self.screen)\n else:\n raise EnvironmentError\n\n pygame.quit()", "def execute(self):\n self.init()\n\n while self.running:\n self.render()\n self.events()\n\n pygame.quit()", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def play(self):\n self.strategy(self)", "def play(self):\r\n self.perform_strategy()", "def main():\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.display.set_caption('8-Puzzle game')\n screen = pygame.display.set_mode((800, 500))\n fpsclock = pygame.time.Clock()\n program = SlidePuzzle((3, 3), 160, 5, difficulty=10) # program is also the gym environment\n\n choice = program.selectPlayerMenu(fpsclock, screen)\n if choice == \"AI\":\n pygame.display.quit()\n trainAI(program)\n elif choice == \"human\":\n launchWithGUI(program, fpsclock, screen)\n del program", "def run_application():\n show_theme_message()\n keep_playing = 'y'\n health_meter = {}\n reset_health_meter(health_meter)\n show_game_mission()\n\n while keep_playing == 'y':\n reset_health_meter(health_meter)\n play_game(health_meter)\n keep_playing = input(\"\\nPlay again? Yes(y)/No(n): \")", "def game_tick_run(self):\n pass", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def run_gui_game():\n # Set up game\n view = GuiView()\n game = GameEngine(view)", "def main():\r\n\r\n #set the display, caption, and timer\r\n pygame.init()\r\n mainClock = pygame.time.Clock()\r\n windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)\r\n pygame.display.set_caption(\"Cat's Big Adventure\")\r\n\r\n #Display a menu, choose a level and instantiate a game\r\n display_menu(windowSurface)\r\n\r\n #initialize the game\r\n stats = [6]\r\n game = Game(stats)\r\n \r\n # run the game loop until the user quits\r\n while True:\r\n # Process events (keystrokes, mouse clicks, etc)\r\n game.process_events(windowSurface)\r\n\r\n # Update object positions, check for collisions\r\n game.run_logic()\r\n \r\n # Draw the current frame\r\n game.display_frame(windowSurface)\r\n\r\n #draw background image\r\n background_image = pygame.image.load(\"sky.png\").convert()\r\n windowSurface.blit(background_image, [0, 0])\r\n \r\n mainClock.tick(FRAMERATE)", "def play_game():\n global pictures\n pictures = init()\n\n #run game at 30 frames per second\n FPS = 30\n FPSCLOCK = pygame.time.Clock()\n\n playing = True\n\n # Main game loop\n while (playing == True):\n playing = handle_key_events()\n render(pictures)\n FPSCLOCK.tick(FPS)\n\n pygame.quit()\n sys.exit()", "def play(self, game):\r\n\r\n raise NotImplementedError(\"Subclass must implement\")", "def play(self):\n\t\tprint(\"play args:\")\n\t\tprint(args)\n\t\tpyglet.clock.schedule_once( self.play_next,\n\t\t\t\t\t\t\t\t\tself._autonext_interval_msec)\n\t\t# instead of using interval schedules, it just callls the same\n\t\t# function repeated so if the system is backed up it won't create\n\t\t# additional problems\n\t\tself._playing = True", "def main():\n even_game()", "def main():\n\n name, game = select_game(vgc.KNOWN_GAMES)\n print('---- Launching: %s -----'%name)\n game.game.main()\n sys.exit(0)", "def run_game(self):\n while True:\n self._check_event()\n self._update_screen()", "def main():\n #Initialize pygame\n pygame.init()\n\n #Set up the display and draw it to screen\n display = DisplayUpdater()\n display.generate_display()\n\n #Set up the audio player\n sound = AudioPlayer()\n\n sound.play_menu_music()\n #Set up the controls\n controls = PlayerInput()\n\n #Start off in the main menu, can go to credits, leaderboard, or game\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)\n #While the user hasn't quit from the main menu\n while user_input != INPUT.ESCAPE:\n #If the player hits ENTER, launch the game\n if user_input == INPUT.ENTER:\n play_demon_music = game(display, sound, controls)\n sound.play_menu_music(play_demon_music)\n controls.clear_menu_input()\n user_input = INPUT.SPACE\n\n #If the player hits SPACE, go to the leaderboard\n if user_input == INPUT.SPACE:\n display.show_leaderboard()\n user_input = controls.get_menu_input(sound)\n #If the player hits C, go to the credits\n if user_input == INPUT.C:\n display.show_credits()\n user_input = controls.get_menu_input(sound)\n\n #If the player hits ESC, return to the main menu.\n #must be in own if statement so we don't quit\n if user_input in (INPUT.ESCAPE, INPUT.SPACE, INPUT.C):\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)", "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "def run_game(self):\n while True:\n self._check_events()\n self.update_screen()", "def run():\n pygame.init()\n settings = Settings()\n screen = pygame.display.set_mode(\n (settings.screen_width, settings.screen_height))\n pygame.display.set_caption('Rocket')\n\n rocket = Rocket(settings, screen)\n\n # Main Loop\n while True:\n check_events(rocket)\n rocket.update()\n update_screen(settings, screen, rocket)", "def run(self):\n while True:\n if self.game_over: \n return \n\n self.handle_events() \n if self.paused:\n continue\n\n self.update_generation()\n self.draw_grid()\n\n self.cap_frame_rate()", "def start_game(self):\n self._puzzle.get_puzzle()\n self._do_outputs()\n\n while self._keep_playing:\n print(\"\")\n print(\"+-----+-----+-----\")\n print(\"\")\n self._get_inputs()\n self._do_updates()\n self._do_outputs()\n print(\"+-----+-----+-----\")", "def run(self):\n #game loop set self.playing to False to end game\n self.playing = True\n while self.playing:\n self.dt = self.clock.tick(FPS) / 1000\n self.events()\n self.update()\n self.draw()\n self.losing_sequence()", "def start(self):\n\n p = Parser()\n if self.event_status < 1:\n print(\"\\n\" * 100)\n self.game_intro()\n print(\"\\n\" * 100)\n\n playing = True\n while playing:\n self.check_upgrades()\n self.check_energy()\n self.check_event_status()\n cur_location = self.player.get_location()\n cur_location.print_description(self.event_status)\n cur_location.print_details(self.event_status)\n print_player_info(self.player)\n cur_location.set_visited(True)\n\n player_command = get_command()\n cmd_action, cmd_exit, cmd_direction, cmd_item, cmd_character = Parser.action_requested(player_command)\n\n print(\"\\n\" * 100)\n if cmd_action == GO:\n self.player.go_exit(self.event_status, direction=cmd_direction, exit_name=cmd_exit)\n\n elif cmd_action == TAKE:\n if cmd_item is None:\n print(\"You can't take that.\")\n else:\n self.player.take(cmd_item)\n\n elif cmd_action == DROP:\n if cmd_item is None:\n print(\"You can't drop that.\")\n else:\n self.player.drop(cmd_item)\n\n elif cmd_action == TALK:\n if cmd_character is None:\n print(\"You can't do talk to that.\")\n else:\n self.player.talk(cmd_character, self.event_status)\n\n elif cmd_action == LOOK:\n self.player.look(self.event_status)\n\n elif cmd_action == SAVEGAME:\n tmp_save_dir = input(\"Enter the save name\\n> \")\n if tmp_save_dir:\n save_dir = tmp_save_dir\n else:\n save_dir = None\n self.save(save_dir)\n\n elif cmd_action == QUIT:\n print(\"Exiting the game...\")\n return\n\n elif cmd_action == LOOK_AT:\n if cmd_item is None:\n print(\"You can't look at that.\")\n else:\n self.player.look_at(cmd_item)\n\n elif cmd_action == LISTEN:\n self.player.listen()\n\n elif cmd_action == PULL:\n if cmd_item is None:\n print(\"You can't pull that.\")\n else:\n self.pull(cmd_item)\n\n elif cmd_action == PUSH:\n if cmd_item is None:\n print(\"You can't push that.\")\n else:\n self.push(cmd_item)\n\n elif cmd_action == CHARGE:\n self.player.charge()\n\n elif cmd_action == USE:\n if cmd_item is None:\n print(\"You can't use that.\")\n else:\n self.use(cmd_item)\n\n elif cmd_action == WAIT:\n sleep_rate = 0.2\n print(\"You wait for a few moments...\")\n time.sleep(2)\n duration = time.time() + 5\n while time.time() < duration:\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"Nothing happened...\")\n time.sleep(2)\n print(\"\\n\" * 100)\n\n elif cmd_action == HELP:\n self.help()\n # wait for user to finish reading\n input(\"Press 'enter' to continue.\")\n\n elif cmd_action == INVENTORY:\n self.player.print_inventory()\n\n elif cmd_action == LOADGAME:\n saved_games_dir = os.path.join(os.getcwd(), \"saved_games\")\n\n # Print Available Saved Games\n print(\"Enter the number of the game you want to load.\")\n saved_games = [game for game in os.listdir(saved_games_dir)]\n for index, sg in enumerate(saved_games):\n print(\"{0}. {1}\".format(index + 1, sg))\n\n # TODO error checking on user input\n user_game_selection = input(\">\")\n user_game = saved_games[int(user_game_selection) - 1]\n print(\"Loading game: {0}\".format(user_game))\n print(\"\\n\" * 100)\n self.load_game(os.path.join(saved_games_dir, user_game))\n else:\n print(\"Huh? That doesn't make any sense.\")", "def main():\n\n # Create logging file, rotate if filesize exceeds 1MB\n logger.add(\"logs/{time}.log\", rotation=\"1 MB\")\n\n GameContainer()\n logger.info(\"Started the game launcher. Make sure to support pygame!\")", "def main():\r\n # Initialize words from specific file\r\n words_list = hangman_helper.load_words()\r\n # Run single game with given word list to choose from\r\n run_single_game(words_list)\r\n # Ask the user if he would like to play again\r\n request = hangman_helper.get_input()\r\n if request[INPUT_TYPE] == hangman_helper.PLAY_AGAIN:\r\n if request[INPUT_VALUE]:\r\n run_single_game(words_list)", "def run_game():\n\n # Initialize pygame, settings, and screen object\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,\n ai_settings.screen_height))\n pygame.display.set_caption(\"Galaga\")\n\n # Make a Play button\n play_button = Button(screen, \"Play\")\n\n # Create an instance to store game statistics\n stats = GameStats(ai_settings)\n\n # Make a scoreboard\n scoreboard = Scoreboard(ai_settings, screen, stats)\n\n # Make a Ship, bullet group, alien group\n ship = Ship(ai_settings, screen)\n bullets = Group()\n aliens = Group()\n\n # Creating an enemy fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # Start the main loop for the game\n while True:\n # Checking for keyboard events \n gf.check_events(ai_settings, screen, stats, scoreboard,\n play_button, ship, aliens, bullets)\n\n if stats.game_active:\n # Update group objects\n ship.update()\n\n gf.update_bullets(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets)\n gf.update_aliens(ai_settings, screen, stats, scoreboard, ship, aliens, bullets)\n \n gf.update_screen(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets, play_button)", "def run_game(self) -> None:\n\n self.setup_play()\n pg.mixer.music.load(path.join(sound_dir, 'theme_full.wav'))\n pg.mixer.music.set_volume(0.3)\n pg.mixer.music.play(-1, fade_ms=1000)\n\n while self.state == State.PLAY:\n\n # Using clock.tick each loop ensures framerate is limited to target FPS\n self.dt = self.clock.tick(FPS)\n\n self.events()\n\n if self.game_state == InGameState.READY:\n # A pause before the game starts.\n\n state_text = \"READY!\"\n\n if self.display_timer is None:\n self.display_timer = 1\n\n elif self.display_timer == 0:\n self.display_timer = None\n self.game_state = InGameState.RUNNING\n\n elif self.game_state == InGameState.COMPLETE:\n # Player survived the time limit and moves n to next level.\n\n state_text = \"You survived!\"\n\n if self.display_timer is None:\n self.display_timer = 1\n\n elif self.display_timer == 0:\n self.display_timer = None\n self.setup_play(reset=True)\n self.game_state = InGameState.READY\n\n else:\n # Regular update step\n\n self.update()\n\n if self.display_timer is None:\n state_text = None\n elif self.display_timer == self.timer:\n self.display_timer = None\n\n if self.player.death_timer == 0:\n if self.lives == 0:\n self.state = State.GAME_OVER\n else:\n self.setup_play(reset=True)\n self.game_state = InGameState.READY\n\n if self.kill_bonus is None:\n\n if self.no_kills() >= self.target_no_kills:\n\n self.display_timer = self.timer - 2\n\n half_time = TIME_LIMIT // 2\n if self.timer >= half_time:\n self.kill_bonus = (self.timer - half_time) // 10 * ENEMY_CLEARANCE_BONUS\n state_text = f\"Kill bonus: {self.kill_bonus}\"\n else:\n self.kill_bonus = 0\n state_text = f\"Too slow - No kill bonus\"\n\n if self.timer == 0:\n self.game_state = InGameState.COMPLETE\n\n self.draw(state_text=state_text)", "def main():\r\n\r\n pygame.init()\r\n pygame.display.init()\r\n\r\n # Set the pygame clock\r\n clock = pygame.time.Clock()\r\n\r\n pygame.display.set_caption(\"Blackbox game\")\r\n current_game = BlackBoxGame()\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n current_game.check_events()\r\n clock.tick(60)\r\n current_game.update_screen()\r\n\r\n pygame.quit()", "def game_loop(self):\n play_state, name, score = 'pre_game', '', None\n while self.playing:\n text = self.check_events()\n if self.START_KEY:\n if play_state == 'pre_game':\n play_state = 'game'\n elif play_state == 'post_game':\n self.playing = False\n self.score_board_obj.updade_scoreboard(score, name)\n\n if play_state == 'pre_game':\n self.display.fill(conf.BLACK)\n self.draw_text('PRESS START TO PLAY', 20, conf.SCREEN_WIDTH // 2, conf.SCREEN_HEIGHT // 2)\n self.window.blit(self.display, (0, 0))\n pygame.display.update()\n\n if play_state == 'game':\n self._log_debug('Started Snake game instance')\n score = SnakeGame.run()\n self._log_debug(f'Finished Snake game instance with score {score}')\n play_state = 'post_game'\n\n if play_state == 'post_game':\n if text:\n name += text\n\n self.display.fill(conf.BLACK)\n self.draw_text(f'Your score is: {score}', 50, conf.SCREEN_WIDTH // 2, conf.SCREEN_HEIGHT // 2)\n self.draw_text('Type Your name:', 20, conf.SCREEN_WIDTH // 2, conf.SCREEN_HEIGHT // 2 + 50)\n self.draw_text(f'{name}', 40, conf.SCREEN_WIDTH // 2, conf.SCREEN_HEIGHT // 2 + 100)\n self.window.blit(self.display, (0, 0))\n pygame.display.update()\n self.reset_keys()", "def run_game():\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Stars\")\r\n\r\n # Make a group of stars.\r\n stars = Group()\r\n\r\n # Create a star system\r\n gf.create_star_system(ai_settings, screen, stars)\r\n \r\n # Main game loop.\r\n while True:\r\n \r\n # Let's player quit the game.\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n gf.update_screen(ai_settings, screen, stars)", "def start(self):\n running = True\n while running:\n k=self.Game.playgame()\n if k=='Exit':\n running = False\n continue\n elif k=='resume':\n continue\n elif k=='GameOver':\n o=self.gameover()\n if o=='newgame':\n self.Game=Game(self.Display)\n else:\n running = False\n while k=='Won':\n o=self.won()\n if o=='newgame':\n self.Game=Game(self.Display)\n break\n elif o==\"Exit\":\n output = self.Game.popup()\n if output == 'resume':\n self.Game.GameBoard.display()\n continue\n else:\n running = True\n break", "def play_game():\n # Display board.\n display_board()\n # While game is still going.\n while game_still_going:\n # Handle a single turn of an arbitrary player.\n handle_turn(current_player)\n # Flip to another player.\n flip_player()\n # Check weather game is over or not.\n check_if_game_over()", "def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")", "def run_game(self):\n while True:\n self._check_events()\n self._update_rain()\n self._update_screen()", "def play():\n\tprint(\"Welcome to TIC TAC TOE!\")\n\tboard, player_mark, message, turn_counter = initialize_game();\n\twhile player_mark != \"GG\":\n\t\tdisplay_game(board, message)\n\t\trow,col = get_coordinates()\n\t\tboard, player_mark, turn_counter = update_game(board, row, col, player_mark, turn_counter)\n\t\tplayer_mark, message = check_status(board, player_mark, turn_counter)\n\telse:\n\t\tdisplay_game(board, message)", "def game_main_loop():\n game_quit = False\n\n while not game_quit:\n #TODO get player input\n events_list = pygame.event.get()\n\n #TODO process input\n for event in events_list:\n if event.type == pygame.QUIT:\n game_quit = True\n draw_game()\n\n pygame.quit()\n exit()", "def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)", "def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()", "def run_game(self):\n game = Poker()\n AI_win = game.play_round(self.name)\n self.update_scores(AI_win)\n message = 'Would you like to play another round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def main():\n pygame.init()\n\n try:\n filename = sys.argv[1]\n except IndexError:\n usage()\n\n game = Game.from_file(filename)\n grid = game.get_grid()\n width, height = grid.get_width(), grid.get_height()\n win = pygame.display.set_mode((width*CELL_SIZE, height*CELL_SIZE))\n selected = 0 # default selected player\n select_player(selected)\n render(win, grid)\n\n while not (game.winning() or game.losing()):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n selected = handle_click_event(grid, selected)\n\n elif event.type == pygame.KEYUP and event.key in KEY_DIRECTIONS:\n game.next_step(selected, KEY_DIRECTIONS[event.key])\n render(win, grid)\n\n # QUIT\n elif event.type == pygame.QUIT:\n pygame.quit()\n\n if game.winning():\n text = \"You win!\"\n game_status = \"win\"\n elif game.losing():\n text = \"You lose!\"\n game_status = \"lose\"\n\n display_end_screen(win, text, game_status)", "def run(self):\n\n # Initialise the pygame display.\n pygame.init()\n pygame.mixer.init()\n self.renderer.initialise()\n\n # Create the game systems.\n self.entity_manager.register_component_system(physics.Physics())\n self.entity_manager.register_component_system(systems.FollowsTrackedSystem())\n self.entity_manager.register_component_system(systems.TrackingSystem())\n self.entity_manager.register_component_system(systems.LaunchesFightersSystem())\n self.entity_manager.register_component_system(systems.KillOnTimerSystem())\n self.entity_manager.register_component_system(systems.PowerSystem())\n self.entity_manager.register_component_system(systems.ShieldSystem())\n self.entity_manager.register_component_system(systems.TextSystem())\n self.entity_manager.register_component_system(systems.AnimSystem())\n self.entity_manager.register_component_system(systems.ThrusterSystem())\n self.entity_manager.register_component_system(systems.ThrustersSystem())\n self.entity_manager.register_component_system(systems.WaveSpawnerSystem())\n self.entity_manager.register_component_system(systems.CameraSystem())\n self.entity_manager.register_component_system(systems.TurretSystem())\n self.entity_manager.register_component_system(systems.TurretsSystem())\n self.entity_manager.register_component_system(systems.WeaponSystem())\n\n # Preload certain images.\n self.resource_loader.preload()\n\n # Make the camera.\n camera = self.entity_manager.create_entity_with(components.Camera,\n components.Body,\n components.Tracking,\n components.FollowsTracked)\n camera.get_component(components.FollowsTracked).follow_type = \"instant\"\n\n # Draw debug info if requested.\n self.game_services.debug_level = self.config.get_or_default(\"debug\", 0)\n\n # Make the player\n player = self.entity_manager.create_entity(\"player.txt\")\n camera.get_component(components.Tracking).tracked.entity = player\n\n # Create a view to pass to the input handling - this lets it map between\n # world and screen coordinates.\n view = drawing.CameraView(self.renderer, camera)\n\n # Make the input handling system.\n self.input_handling = input_handling.InputHandling(view, self.game_services)\n\n # Create the wave spawner.\n if not self.config.get_or_default(\"peaceful_mode\", False):\n self.entity_manager.register_component_system(systems.WaveSpawnerSystem())\n\n # Make it so that bullets can damage things.\n self.entity_manager.get_system(physics.Physics).add_collision_handler(\n DamageCollisionHandler()\n )\n\n # Set the scrolling background.\n self.drawing.set_background(\"res/images/857-tileable-classic-nebula-space-patterns/6.jpg\")\n\n # Run the game loop.\n self.running = True\n fps = 60\n clock = pygame.time.Clock()\n tick_time = 1.0/fps\n while self.running:\n\n # Has a load been requested?\n if self.want_load:\n self.entity_manager.load(open(\"space_game.save\", \"r\"))\n self.want_load = False\n\n ## Create any queued objects\n self.entity_manager.create_queued_objects()\n\n # If a pause has been scheduled then pause the game.\n if self.want_pause:\n self.want_pause = False\n self.entity_manager.pause()\n\n # If an unpause has been scheduled then unpause the game.\n if self.want_resume:\n self.want_resume = False\n self.entity_manager.unpause()\n\n # If a step has been scheduled then advance a frame and schedule a\n # pause.\n if self.want_step:\n self.entity_manager.unpause()\n self.want_pause = True\n self.want_step = False\n\n # Input\n for e in pygame.event.get():\n response = self.input_handling.handle_input(e)\n if response.quit_requested:\n self.running = False\n\n # Update the systems.\n self.entity_manager.update(tick_time)\n\n # Draw\n self.renderer.pre_render(view)\n self.drawing.draw(view)\n self.renderer.post_render()\n self.renderer.flip_buffers()\n\n # Maintain frame rate.\n clock.tick(fps)\n\n # Remember how long the frame took.\n limited_fps = 1.0/(clock.get_time() / 1000.0)\n raw_fps = 1.0/(clock.get_rawtime() / 1000.0)\n time_ratio = (1.0/fps) / (clock.get_time()/1000.0)\n self.game_services.info.update_framerate(limited_fps,\n raw_fps,\n time_ratio)\n\n # Finalise\n pygame.quit()", "def play(self):\n if not self.active:\n return\n game_info = {\n 'partisans': self.partisans,\n 'swing': self.swing,\n 'media': self.media,\n 'news': self.news,\n 'mojo': self.mojo,\n 'hype': self.hype,\n 'money': self.money,\n 'cash': self.cash,\n 'cards': self.hand.cards,\n\n 'opp_partisans': self.opponent.partisans,\n 'opp_swing': self.opponent.swing,\n 'opp_media': self.opponent.media,\n 'opp_news': self.opponent.news,\n 'opp_mojo': self.opponent.mojo,\n 'opp_hype': self.opponent.hype,\n 'opp_money': self.opponent.money,\n 'opp_cash': self.opponent.cash,\n 'opp_cards': self.opponent.hand.cards\n }\n\n # print('################')\n # print('opp cards:')\n # for card in game_info['opp_cards']:\n # print(card)\n # print('----------------')\n # print('bots cards:')\n # for card in game_info['cards']:\n # print(card)\n # print('################')\n card, action = self.analysis(game_info)\n\n if action == TO_PRESS:\n card.use()\n elif action == TO_DROP:\n card.drop()", "def run(self):\n\n \"\"\"Call this function before trying to play any video with\n play_segment() or play().\n \"\"\"\n print(\"Task 2 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 2: {}\".format(os.getpid()))\n\n # If we don't use the MainLoop, messages are never sent.\n def start():\n print(\"Task 3 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 3: {}\".format(os.getpid()))\n print('====================> Using MainLoop\\n')\n loop = GLib.MainLoop()\n loop.run()\n \n \n print('====================> Starting a new thread for the player\\n')\n t = threading.Thread(target=start, name='thread_player')\n t.start()\n #_thread.start_new_thread(start, ())", "def GAMEOVER_LOOP():\n pass", "def main():\n dealCards().mainloop()", "def basic_begin_game(game_context) :\n game_context.world.set_game_defined()\n execute_context(game_context)", "def game_loop(self):\n while self.playing:\n self.handle_events()\n if self.START_KEY:\n self.playing = False\n self.display.fill(self.bg_color)\n self.breakout_loop()\n self.screen.blit(self.display, (0,0))\n pygame.display.update()\n self.reset_keys()", "def run_game():\n pygame.init()\n init_settings = Settings()\n\n screen = pygame.display.set_mode(\n (init_settings.screen_width, init_settings.screen_height)\n )\n pygame.display.set_caption(\"Alien Invasion\")\n\n ship = Ship(init_settings, screen)\n\n bullets = Group()\n\n \"\"\" Start Game \"\"\"\n while True:\n \"\"\" listening mouse and key events \"\"\"\n h.check_events(init_settings, screen, ship, bullets)\n ship.update()\n h.update_bullets(bullets)\n h.update_screen(init_settings, screen, ship, bullets)", "def start():\r\n introduction()\r\n score = duck_shooting1()\r\n dogs()\r\n play_again(score)", "def main():\n number_of_players = get_number_of_players()\n number_of_decks = get_number_of_decks()\n game_data = setup_game(number_of_players)\n\n player_list = game_data[0]\n play_shoe = game_data[2]\n play_dealer = game_data[1]\n play_again = True\n\n while play_again:\n replay = play_game(play_shoe, player_list, play_dealer, number_of_decks)\n if replay:\n play_shoe = replay[1]\n else:\n play_again = False\n \n print(\"Thanks for playing\")", "def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()", "def run_game(self):\n\t\twhile True:\n\t\t\tself._check_event()\n\t\t\tself.ship.update()\n\t\t\tself._update_bullets()\n\t\t\tself._update_aliens()\n\t\t\tself._update_screen()", "def main(argv):\n config_options = parse_config(CONFIG_FILE_NAME)\n arguments_options = parse_args(argv, **config_options)\n playgame.main(arguments_options)", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke\")\n self.engines = [GameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def run_server(self, GameState):\n pass", "def play(self):\n if self.quit:\n return\n game_type= self.game_mode[0]\n difficulty= self.game_mode[1]\n ini = time()\n self.board = [[0]+[Piece(i%2,1) for i in range(self.board_size-2)]+[0]]+[[Piece(i%2,0)]+[0 for i in range(self.board_size-2)]+[Piece(i%2,0)] for i in range(self.board_size-2)]+[[0]+[Piece(i%2,1) for i in range(self.board_size-2)]+[0]]\n self.buttons = [[i for i in range(self.board_size)] for j in range(self.board_size)]\n self.selected = None\n self.active_player = 0\n self.quit = False\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n self.buttons[i][j] = pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n self.draw()\n if game_type==1:\n self.play_pvp()\n elif game_type==2:\n self.play_pvc(difficulty)\n elif game_type==3:\n difficulty2 = self.game_mode[2]\n self.play_cvc(difficulty, difficulty2)", "def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()", "def main():\n # Initialize the event manager.\n event_manager = events.EventManager()\n AppState.get_state().set_event_manager(event_manager)\n\n # Initialize and register the application heartbeat.\n heart_beat = HeartBeat()\n event_manager.register_listener(heart_beat)\n\n # Initialize and register the world.\n basic_experiment = experiment.basic.BasicExperiment()\n world = basic_experiment.get_world()\n event_manager.register_listener(world)\n AppState.get_state().set_world(world)\n\n # Initialize pygame.\n surface = init()\n\n # Initialize and register the view.\n main_view = view.View(surface)\n event_manager.register_listener(main_view)\n\n # Initialize and register the controller.\n main_controller = controller.Controller()\n event_manager.register_listener(main_controller)\n\n # Start the heartbeat.\n heart_beat.run()", "def play(self):\n\n #Call the superclass play\n return super().play()", "def run(self):\n r = self.engine.run()\n while r != QUIT_FLAG:\n if r == SWITCH_FLAG:\n if self.engines.index(self.engine) < len(self.engines) - 1:\n self.engine = self.engines[self.engines.index(self.engine) + 1]\n print self.engines.index(self.engine)\n self.e_e.reset_input()\n else:\n self.engine = self.engines[0]\n r = self.engine.run()\n pygame.quit()\n raise SystemExit", "def begin(self):\n # Add the close listener.\n engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit())\n # Play the game music.\n engine.Engine.stop_sound(\"MenuMusic\")\n engine.Engine.play_sound(\"GameMusic\", -1, 50)", "async def main(self):\n\n # Fetching the game memory\n memory = await self._read_memory()\n\n if not memory:\n log.warn(\"Could not find GD. Program will wait until it is found.\")\n gdsb.wait_for_gd()\n return\n\n if memory.is_in_level():\n\n if memory.get_level_id() != self.previous_level_id:\n await self.seal_embed()\n\n # Fetch gamestate data\n self.current_level = await self._fetch_level_info()\n \n # Gets wether or not the level being played is the exact same level that was played just previously, so that it will continue to use the same embed\n if self.previous_level_id != self.current_level.id:\n\n self.embed_message = None\n self.previous_embed_message = None\n\n self.session.start_attempts = self.current_level.attempts\n self.session.old_best = self.current_level.best_percent\n self.session.best = 0\n\n else:\n\n if self.previous_embed_message is not None:\n self.embed_message = self.previous_embed_message\n\n self.previous_level_id = self.current_level.id\n\n # So for some reason, the main levels all have their creator blank, so we just set it to RobTop\n if self.current_level.creator == \"\":\n self.current_level.creator = \"RobTop\"\n\n # Getting if you are playing a main level or not. If so, we have to manually set the difficulty using the list I made earlier\n if self.current_level.id in range(1,22):\n \n self.current_level.difficulty = const.MAIN_LEVEL_DIFFICULTIES[self.current_level.id-1]\n\n # Checks if the player is in practice mode or not. If they are, it will display a different color\n if self.current_level.is_practice_mode():\n title = \"Practicing: {0}\"\n color = discord.Color.from_rgb(59, 223, 245)\n else:\n title = \"Playing: {0}\"\n color = discord.Color.from_rgb(18, 219, 31)\n\n # A few little extra texts that go next to the title\n extra_text = \"\"\n if self.current_level.percent == 100:\n\n if self.current_level.is_practice_mode():\n extra_text=\" - PRACTICE COMPLETE!\"\n else:\n extra_text=\" - LEVEL COMPLETE!\"\n color = discord.Color.from_rgb(237, 220, 28)\n\n elif self.current_level.best_percent > self.session.old_best:\n extra_text = \" - New Best!\"\n self.session.old_best = self.current_level.best_percent\n\n # Saving the best percent of the session\n if self.current_level.percent > self.session.best and not self.current_level.is_practice_mode():\n self.session.best = self.current_level.percent\n\n # Calculating the current attempts on a level\n self.current_level.attempts = (self.current_level.attempts - self.session.start_attempts) + 1\n\n rating_text = self._get_rating_text()\n category = self._get_category_text()\n\n self.embed.title = title.format(self.current_level.name)\n self.embed.description = f\"By {' | '.join((self.current_level.creator, rating_text, category))}\"\n self.embed.color = color\n\n self.embed.set_thumbnail(url=const.FACES[const.DIFFICULTIES.index(self.current_level.difficulty)])\n\n # Getting user\n user = self.bot.get_user(conf.user)\n\n self.embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n\n progress_bar_state = self._get_progress_bar(self.current_level.percent)\n\n fields = (\n {\"name\": \"Attempt:\", \"value\": self.current_level.attempts, \"inline\": True},\n {\"name\": \"Best %:\", \"value\": f\"{self.current_level.best_percent}%\", \"inline\": True},\n {\"name\": \"Current Progress:\", \"value\": f\"{self.current_level.percent}%{extra_text}\\n{progress_bar_state}\", \"inline\": False}\n )\n\n for i, field in enumerate(fields):\n\n if len(self.embed.fields) < len(fields):\n self.embed.add_field(**field)\n else:\n self.embed.set_field_at(i, **field)\n \n self.embed.set_footer(text=\"Level ID: {0}\".format(self.current_level.id))\n \n # Sending embed\n\n channel = self.bot.get_channel(conf.channel)\n\n if not channel:\n log.error(f\"Could not find channel with id: {conf.channel}. Use '{conf.prefix}set_channel' to set the channel.\")\n else:\n #If the channel is found, edit the message the embed has been sent to, and if it dosent exist, create it.\n if self.embed_message is None:\n self.embed_message = await channel.send(embed=self.embed)\n else:\n await self.embed_message.edit(embed=self.embed)\n \n else:\n\n if memory:\n await self.seal_embed()\n\n #Sets some globals so that the embed can be reused if the same level is played again\n self.previous_embed_message = self.embed_message\n self.embed_message = None" ]
[ "0.8527575", "0.8128602", "0.8080839", "0.79423046", "0.79204637", "0.7907992", "0.78166646", "0.77512056", "0.7606996", "0.7596167", "0.75618863", "0.75584793", "0.7554812", "0.75141007", "0.7511485", "0.7502417", "0.7497473", "0.7493972", "0.74235296", "0.7420933", "0.73897415", "0.73809963", "0.730338", "0.72898436", "0.7287278", "0.7249723", "0.7249492", "0.7210004", "0.7115387", "0.71152985", "0.71030873", "0.7089231", "0.7048706", "0.70324147", "0.7027987", "0.701519", "0.70141226", "0.6989864", "0.6965902", "0.69587743", "0.69551677", "0.69369304", "0.6927066", "0.68982613", "0.68836117", "0.6861602", "0.6851814", "0.6849584", "0.68354946", "0.6828787", "0.682083", "0.6819464", "0.681055", "0.6802844", "0.6799692", "0.6782125", "0.6779408", "0.6769257", "0.67571807", "0.6755253", "0.6748268", "0.6748245", "0.6735681", "0.67347175", "0.6710233", "0.6703638", "0.6684194", "0.6673532", "0.6673503", "0.66731256", "0.66707015", "0.6654229", "0.6641506", "0.66368234", "0.66210926", "0.65952855", "0.65868044", "0.65841055", "0.65821624", "0.6578963", "0.6573979", "0.6568317", "0.6563424", "0.6554411", "0.6552059", "0.6551492", "0.6546865", "0.6543853", "0.65327114", "0.65265775", "0.6524234", "0.6516643", "0.65156615", "0.65080416", "0.65041417", "0.6503536", "0.6496739", "0.6495825", "0.6491831", "0.6488676" ]
0.6931555
42
Retrieve, update or delete a code snippet.
def student_detail(request, pk): try: students = student.objects.get(pk=pk) except students.DoesNotExist: return HttpResponse(status=404) if request.method == 'GET': serializer = studentSerializer(students) return JsonResponse(serializer.data) elif request.method == 'PUT': data = JSONParser().parse(request) serializer = studentSerializer(students, data=data) if serializer.is_valid(): serializer.save() return JsonResponse(serializer.data) return JsonResponse(serializer.errors, status=400) elif request.method == 'DELETE': students.delete() return HttpResponse(status=204)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def snippet_detail(request, pk):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def snippet_detail(request, pk):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def snippet_detail(request, pk, format=None):\r\n try:\r\n snippet = Snippet.objects.get(pk=pk)\r\n except Snippet.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = SnippetSerializer(snippet)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = SnippetSerializer(snippet, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n snippet.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)", "def snippet(self) -> global___Snippet:", "def get(name):\n #retrieve the snippet from the db - commnet from session of nicole darcy\n #i added the 'cursor= ' line because it said it was unused code, copied it from def put()\n# commenting lines below to replace with new code as per class lesson\n # cursor=connection.cursor()\n # row = cursor.fetchone()\n # connection.commit()\n with connection, connection.cursor() as cursor:\n cursor.execute(\"select message from snippets where keyword=%s\", (name,))\n row = cursor.fetchone()\n if not row:\n #No snippet was found with that name.\n return \"404: Snippet not Found\"\n return row[0]\n \n # warning for 'unreachable code' so i commented it out...\n # logging.error(\"FIXME: Unimplemented - get({!r})\".format(name))\n # print(\"this function is running\",get.__name__)\n # return \"\"", "def snippet_detail(request, pk):\n try:\n snippet = Quickstart.objects.get(pk=pk)\n except Quickstart.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = QuickstartSerializer(snippet)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = QuickstartSerializer(snippet, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n quickstart.delete()\n return HttpResponse(status=204)", "def snippet(self, source, *args, **kwds):\n tables = {\"directives\" : self.directives, **self.tables, **kwds}\n return c_snippet(source, *args, **tables)", "def put(name, snippet):\n # commenting the 3 lines below in order to try and refactor the put method to use context managers\n # logging.info(\"Storing snippet {!r}: {!r}\".format(name, snippet))\n # cursor = connection.cursor()\n # command = \"insert into snippets values (%s, %s)\"\n with connection, connection.cursor() as cursor:\n cursor.execute(\"store snippets where values=%s\", (snippet,))\n\n \n try:\n command = \"insert into snippets values (%s, %s)\"\n cursor.execute(command, (name, snippet))\n except psycopg2.IntegrityError as e:\n connection.rollback()\n command = \"update snippets set message=% where keyword=%s\"\n cursor.execute(command, (snippet, name))\n connection.commit()\n logging.debug(\"Snippet stored successfully.\")\n return name, snippet", "def get_snippet(res_type, snippet_name):\n\treturn get_settings_resource(res_type, snippet_name, 'snippets');", "def snippet_delete(request, snippet_id=None):\n snippet_id = snippet_id or request.POST.get('snippet_id')\n if not snippet_id:\n raise Http404('No snippet id given')\n snippet = get_object_or_404(Snippet, secret_id=snippet_id)\n snippet.delete()\n return HttpResponseRedirect(reverse('snippet_new'))", "def pysnippet(self, code, lineno=1, colno=None, extralines=3, line_numbers=True):\n with self._lock:\n if not code:\n return\n if colno is not None:\n highlight_columns = (colno - 1, colno)\n else:\n highlight_columns = None\n _lineno = max(0, lineno - extralines)\n\n highlighter = PythonHighlighter()\n\n self.snippet(\n code,\n (_lineno, _lineno + extralines * 2 + 1),\n highlight_line=lineno,\n highlight_columns=highlight_columns,\n line_numbers=True,\n highlighter=highlighter,\n )\n return self", "def snippet_detail_apiview(request, pk, format=None):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet, context={'request': request})\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = SnippetSerializer(snippet, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "async def snippet(command, app):\n response = Message()\n response[\"channel\"] = command[\"channel_id\"]\n response[\"unfurl_links\"] = False\n\n response[\"text\"] = (\n \"Please use the snippet feature, or backticks, when sharing code. \\n\"\n \"To include a snippet, click the :paperclip: on the left and hover over \"\n \"`Create new...` then select `Code or text snippet`.\\n\"\n \"By wrapping the text/code with backticks (`) you get:\\n\"\n \"`text formatted like this`\\n\"\n \"By wrapping a multiple line block with three backticks (```) you can get:\\n\"\n )\n\n await app.plugins[\"slack\"].api.query(url=methods.CHAT_POST_MESSAGE, data=response)\n\n response[\"text\"] = (\n \"```\\n\"\n \"A multiline codeblock\\nwhich is great for short snippets!\\n\"\n \"```\\n\"\n \"For more information on snippets, click \"\n \"<https://get.slack.help/hc/en-us/articles/204145658-Create-a-snippet|here>.\\n\"\n \"For more information on inline code formatting with backticks click \"\n \"<https://get.slack.help/hc/en-us/articles/202288908-Format-your-messages#inline-code|here>.\"\n )\n\n await app.plugins[\"slack\"].api.query(url=methods.CHAT_POST_MESSAGE, data=response)", "def snippet_details(request, snippet_id, template_name='dpaste/snippet_details.html', is_raw=False):\n snippet = get_object_or_404(Snippet, secret_id=snippet_id)\n\n # One time snippet get deleted if the view count matches our limit\n if snippet.expire_type == Snippet.EXPIRE_ONETIME \\\n and snippet.view_count >= ONETIME_LIMIT:\n snippet.delete()\n raise Http404()\n\n # Increase the view count of the snippet\n snippet.view_count += 1\n snippet.save()\n\n tree = snippet.get_root()\n tree = tree.get_descendants(include_self=True)\n\n new_snippet_initial = {\n 'content': snippet.content,\n 'lexer': snippet.lexer,\n }\n\n if request.method == \"POST\":\n snippet_form = SnippetForm(\n data=request.POST,\n request=request,\n initial=new_snippet_initial)\n if snippet_form.is_valid():\n new_snippet = snippet_form.save(parent=snippet)\n url = new_snippet.get_absolute_url()\n return HttpResponseRedirect(url)\n else:\n snippet_form = SnippetForm(\n initial=new_snippet_initial,\n request=request)\n\n template_context = {\n 'snippet_form': snippet_form,\n 'snippet': snippet,\n 'lexers': LEXER_LIST,\n 'lines': range(snippet.get_linecount()),\n 'tree': tree,\n 'wordwrap': snippet.lexer in LEXER_WORDWRAP and 'True' or 'False',\n }\n\n response = render_to_response(\n template_name,\n template_context,\n RequestContext(request)\n )\n\n if is_raw:\n response['Content-Type'] = 'text/plain;charset=UTF-8'\n response['X-Content-Type-Options'] = 'nosniff'\n return response\n else:\n return response", "def edit_snippet(request, snippet_id):\n snippet = get_object_or_404(Snippet,\n pk=snippet_id,\n author__pk=request.user.id)\n if request.method == 'POST':\n form = forms.EditSnippetForm(request.POST, instance=snippet)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(snippet.get_absolute_url())\n else:\n form = forms.EditSnippetForm(instance=snippet)\n return render_to_response('cab/edit_snippet_form.html',\n { 'form': form,\n 'original': snippet },\n context_instance=RequestContext(request))", "def snippet_detail(request, snippet_id):\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n return render_to_response('cab/snippet_detail.html',\n { 'object': snippet,\n 'num_ratings': snippet.rating_set.count(),\n 'rating_score': Rating.objects.score_for_snippet(snippet.id) },\n context_instance=RequestContext(request))", "def get(name):\n # cursor = connection.cursor()\n # command = \"select message from snippets where keyword='%s'\"%name\n # cursor.execute(command)\n # connection.commit()\n # message_tuple=cursor.fetchone()\n \n with connection, connection.cursor() as cursor:\n cursor.execute(\"select message from snippets where keyword=%s\", (name,))\n row = cursor.fetchone()\n \n if len(row)>0:\n logging.debug(\"Get Snippet successfully.\")\n return row[0]\n else:\n logging.debug(\"No Snippet got\")\n return \"\"", "def snippet_detail_csrf(request, pk, format=None):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet, context={'request': request})\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = SnippetSerializer(snippet, data=data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def update(self, instance: Snippet, validated_data: dict) -> Snippet:\n instance.title = validated_data.get('title', default=instance.title)\n instance.code = validated_data.get('code', default=instance.code)\n instance.language = validated_data.get('language', default=instance.language)\n instance.style = validated_data.get('style', default=instance.style)\n instance.save()\n return instance", "def add_snippet(request):\n original_id = request.GET.get('oid', None)\n \n if request.method == 'POST':\n form = forms.AddSnippetForm(request.POST)\n if form.is_valid():\n new_snippet = form.save(commit=False)\n new_snippet.author = request.user\n if original_id:\n new_snippet.original_id = original_id\n new_snippet.save()\n return HttpResponseRedirect(new_snippet.get_absolute_url())\n else:\n form = forms.AddSnippetForm()\n return render_to_response('cab/add_snippet_form.html',\n { 'form': form },\n context_instance=RequestContext(request))", "def visit_snippet(self, node):\n lang = node[\"language\"]\n linenos = node.get('linenos', False)\n fname = node['filename']\n highlight_args = node.get('highlight_args', {})\n if 'language' in node:\n # code-block directives\n lang = node['language']\n highlight_args['force'] = True\n if 'linenos' in node:\n linenos = node['linenos']\n\n def warner(msg):\n self.builder.warn(msg, (self.builder.current_docname, node.line))\n\n highlighted = self.highlighter.highlight_block(node.rawsource, lang,\n warn=warner,\n linenos=linenos,\n **highlight_args)\n starttag = self.starttag(node, 'div', suffix='',\n CLASS='highlight-%s snippet' % lang)\n self.body.append(starttag)\n self.body.append('<div class=\"snippet-filename\">%s</div>\\n''' % (fname,))\n self.body.append(highlighted)\n self.body.append('</div>\\n')\n raise nodes.SkipNode", "def visit_snippet_literal(self, node):\n self.visit_literal_block(node)", "def __push_snippets(self, snippet):\n if not self.__postgre_db.is_in_table(\"snippets\", \"snippet=\" + add_quotes(\n replace_special_characters(snippet))):\n self.__postgre_db.insert(\"snippets\", {\"snippet\": snippet})", "def update_code(self):\n print ('update code')\n self.query_dict.update({'code':code.value})", "def snippetDetail(requeset, pk, format = None):", "def getItemByCode(self, code):\n try:\n PATH = os.path.dirname(os.path.realpath(__file__))\n DATABASE = os.path.join(PATH, '..', 'db', 'store.db')\n conn = sqlite3.connect(DATABASE)\n c = conn.cursor()\n c.execute('SELECT * FROM ITEMS WHERE CODE=?', (code,))\n row = c.fetchone()\n while True:\n if row == None:\n return None\n else:\n return row\n except sqlite3.Error as e:\n print(\"An error occurred while retrieving Item: \", e.args[0])\n return None\n finally:\n c.close()\n conn.close()", "async def fetch_github_snippet(session, repo, path, start_line, end_line):\n\n headers = {'Accept': 'application/vnd.github.v3.raw'}\n if \"GITHUB_TOKEN\" in os.environ:\n headers['Authorization'] = f'token {os.environ[\"GITHUB_TOKEN\"]}'\n\n refs = (await fetch_http(session, f'https://api.github.com/repos/{repo}/branches', 'json', headers=headers) +\n await fetch_http(session, f'https://api.github.com/repos/{repo}/tags', 'json', headers=headers))\n\n ref = path.split('/')[0]\n file_path = '/'.join(path.split('/')[1:])\n for possible_ref in refs:\n if path.startswith(possible_ref['name'] + '/'):\n ref = possible_ref['name']\n file_path = path[len(ref) + 1:]\n break\n\n file_contents = await fetch_http(\n session,\n f'https://api.github.com/repos/{repo}/contents/{file_path}?ref={ref}',\n 'text',\n headers=headers,\n )\n\n return await snippet_to_embed(file_contents, file_path, start_line, end_line)", "def xmlsnippet(self, code, lineno=1, colno=None, extralines=3, line_numbers=True):\n with self._lock:\n if not code:\n return\n if colno is not None:\n highlight_columns = (colno - 1, colno)\n else:\n highlight_columns = None\n _lineno = max(0, lineno - extralines)\n\n self.snippet(\n code,\n (_lineno, _lineno + extralines * 2 + 1),\n highlight_line=lineno,\n highlight_columns=highlight_columns,\n line_numbers=True,\n )\n return self", "async def fetch_bitbucket_snippet(session, repo, ref, file_path, start_line, end_line):\n\n file_contents = await fetch_http(\n session,\n f'https://bitbucket.org/{quote_plus(repo)}/raw/{quote_plus(ref)}/{quote_plus(file_path)}',\n 'text',\n )\n\n return await snippet_to_embed(file_contents, file_path, start_line, end_line)", "def add_one_code(writer, entry, next_id):\n q_id = entry['Id']\n snippets = get_possible_snippets(q_id, True)\n if not snippets is None:\n for code, lang, ans_id in snippets:\n writer.add_document(question_id=q_id, answer_id=ans_id, code=return_unicode(code), language=return_unicode(lang),code_id=return_unicode(next_id))\n CR_DOCS_DB.insert({\"question_id\": q_id, \"answer_id\": ans_id, \"code\": return_unicode(code), \"language\": return_unicode(lang), \"code_id\": return_unicode(next_id)})\n print return_unicode(next_id)\n return 1\n return 0", "def snippet_list(request):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def snippet_list(request):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def get_snippet(self, title=None):\n for snippet in self.snippets:\n if snippet[\"title\"] == title:\n return snippet\n return None", "def getitem(self, code):\n\n fetch = self._collection.find_one({'code':code})\n\n return fetch", "async def fetch_github_gist_snippet(session, gist_id, revision, file_path, start_line, end_line):\n\n headers = {'Accept': 'application/vnd.github.v3.raw'}\n if \"GITHUB_TOKEN\" in os.environ:\n headers['Authorization'] = f'token {os.environ[\"GITHUB_TOKEN\"]}'\n\n gist_json = await fetch_http(\n session,\n f'https://api.github.com/gists/{gist_id}{f\"/{revision}\" if len(revision) > 0 else \"\"}',\n 'json',\n headers=headers,\n )\n\n for gist_file in gist_json['files']:\n if file_path == gist_file.lower().replace('.', '-'):\n file_contents = await fetch_http(\n session,\n gist_json['files'][gist_file]['raw_url'],\n 'text',\n )\n\n return await snippet_to_embed(file_contents, gist_file, start_line, end_line)\n\n return ''", "def add_code(self, doc):\n\t\timport os\n\t\tfrom webnotes.modules import scrub, get_module_path\n\t\timport conf\n\t\t\n\t\tmodules_path = get_module_path(doc.module)\n\n\t\tpath = os.path.join(modules_path, 'doctype', scrub(doc.name))\n\n\t\tdef _add_code(fname, fieldname):\n\t\t\tfpath = os.path.join(path, fname)\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\tdoc.fields[fieldname] = f.read()\n\t\t\t\n\t\t_add_code(scrub(doc.name) + '.js', '__js')\n\t\t_add_code(scrub(doc.name) + '.css', '__css')\n\t\t_add_code('%s_list.js' % scrub(doc.name), '__listjs')\n\t\t_add_code('help.md', 'description')\n\t\t\n\t\t# embed all require files\n\t\timport re\n\t\tdef _sub(match):\n\t\t\tfpath = os.path.join(os.path.dirname(conf.modules_path), \\\n\t\t\t\tre.search('[\"\\'][^\"\\']*[\"\\']', match.group(0)).group(0)[1:-1])\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\treturn '\\n' + f.read() + '\\n'\n\t\t\telse:\n\t\t\t\treturn '\\n// no file \"%s\" found \\n' % fpath\n\t\t\n\t\tif doc.fields.get('__js'):\n\t\t\tdoc.fields['__js'] = re.sub('(wn.require\\([^\\)]*.)', _sub, doc.fields['__js'])\n\t\t\n\t\t# custom script\n\t\tfrom webnotes.model.code import get_custom_script\n\t\tcustom = get_custom_script(doc.name, 'Client') or ''\n\t\tdoc.fields['__js'] = doc.fields.setdefault('__js', '') + '\\n' + custom", "def restore_object(self, attrs, instance=None):\n if instance:\n # Update existing instance\n instance.title = attrs.get('title', instance.title)\n instance.code = attrs.get('code', instance.code)\n instance.value = attrs.get('value', instance.value)\n return instance\n\n return Snippet(**attrs)", "def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content", "def download(request, snippet_id):\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n response = HttpResponse(snippet.code, mimetype='text/plain')\n response['Content-Disposition'] = 'attachment; filename=%s.%s' % (snippet.id,\n snippet.language.file_extension)\n response['Content-Type'] = snippet.language.mime_type\n return response", "def save(self, *args, **kwargs):\n if not self.id:\n self.created = timezone.now()\n\n self.modified = timezone.now()\n\n return super(CodeSnippet, self).save(*args, **kwargs)", "def signature(self) -> global___SnippetSignature:", "def _load_snippet(filename) -> str:\n fullpath = f'{dirname(__file__)}/js/{filename}'\n file = open(fullpath, 'r')\n script = file.read()\n file.close()\n return script", "def save(self, *args, **kwargs):\n\t lexer = get_lexer_by_name(self.language)\n\t linenos = self.linenos and 'table' or False\n\t options = self.title and {'title': self.title} or {}\n\t formatter = HtmlFormatter(style=self.style, linenos=linenos,\n\t full=True, **options)\n\t self.highlighted = highlight(self.code, lexer, formatter)\n\t super(Snippet, self).save(*args, **kwargs)", "def create_snippet(self, doc, spdx_id):\n self.reset_snippet()\n spdx_id = spdx_id.split('#')[-1]\n if validations.validate_snippet_spdx_id(spdx_id):\n doc.add_snippet(snippet.Snippet(spdx_id=spdx_id))\n self.snippet_spdx_id_set = True\n return True\n else:\n raise SPDXValueError('Snippet::SnippetSPDXID')", "def set_snippet_comment(self, doc, comment):\n self.assert_snippet_exists()\n if not self.snippet_comment_set:\n self.snippet_comment_set = True\n if validations.validate_snip_comment(comment):\n doc.snippet[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Snippet::SnippetComment')\n else:\n raise CardinalityError('Snippet::SnippetComment')", "def save(self, *args, **kwargs):\n lexer = get_lexer_by_name(self.language)\n linenos = self.linenos and 'table' or False\n options = self.title and {'title': self.title} or {}\n formatter = HtmlFormatter(style=self.style, linenos=linenos,\n full=True, **options)\n self.highlighted = highlight(self.code, lexer, formatter)\n super(Snippet, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n lexer = get_lexer_by_name(self.language)\n linenos = self.linenos and 'table' or False\n options = self.title and {'title': self.title} or {}\n formatter = HtmlFormatter(style=self.style, linenos=linenos,\n full=True, **options)\n self.highlighted = highlight(self.code, lexer, formatter)\n super(Snippet, self).save(*args, **kwargs)", "def dis_lite(self, code, address):\n dis_gen = self.cs.disasm_lite(code, address)\n return dis_gen", "def pull_code(notebook):\n cells = notebook[\"cells\"]\n code = []\n for cell in cells:\n if cell[\"cell_type\"] == \"code\":\n code.extend(cell[\"source\"] + [\"\\n\"])\n return ''.join(code)", "def update_code(self, new_code):\n\n new_code = self.code", "def snippet_new(request, template_name='dpaste/snippet_new.html'):\n if request.method == \"POST\":\n snippet_form = SnippetForm(data=request.POST, request=request)\n if snippet_form.is_valid():\n new_snippet = snippet_form.save()\n url = new_snippet.get_absolute_url()\n return HttpResponseRedirect(url)\n else:\n snippet_form = SnippetForm(request=request)\n\n template_context = {\n 'snippet_form': snippet_form,\n 'lexer_list': LEXER_LIST,\n 'is_new': True,\n }\n\n return render_to_response(\n template_name,\n template_context,\n RequestContext(request)\n )", "def save(self, *args, **kwargs):\n\n self.html_text = htmlize(self.text, self.language)\n super(Snippet, self).save(*args, **kwargs)", "def snippet_list(request, format=None): # format=None: 데이터 형태에 대한 포맷정보가 붙는다.\r\n if request.method == 'GET':\r\n snippets = Snippet.objects.all()\r\n serializer = SnippetSerializer(snippets, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = SnippetSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def snippet_gist(request, snippet_id): # pragma: no cover\n snippet = get_object_or_404(Snippet, secret_id=snippet_id)\n data = {\n 'description': getattr(settings, 'DPASTE_DEFAULT_GIST_DESCRIPTION', ''),\n 'public': False,\n 'files': {\n getattr(settings, 'DPASTE_DEFAULT_GIST_NAME', 'dpaste.de_snippet.py'): {\n 'content': snippet.content,\n }\n }\n }\n\n try:\n payload = json.dumps(data)\n response = requests.post('https://api.github.com/gists', data=payload)\n response_dict = response.json()\n gist_url = response_dict.get('html_url')\n\n # Github could be down, could return invalid JSON, it's rare\n except:\n return HttpResponse('Creating a Github Gist failed. Sorry, please go back and try again.')\n\n return HttpResponseRedirect(gist_url)", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def test_render_snippet_id(self):\n template = SnippetTemplateFactory(code='<p>{{ snippet_id }}</p>')\n eq_(template.render({'myvar': 'foo'}), '<p>0</p>')", "def examine_document(self, snippet):\n\n if self.documents_examined < len(snippet):\n snippet = snippet[self.documents_examined]\n screen = self.driver.perform_action((20, snippet))\n self.documents_examined = self.documents_examined + 1\n\n return screen\n else:\n self.driver.perform_action((00, None))\n return None", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def books_patch_delete(request, pk):\n try:\n snippet = Books.objects.get(url=pk)\n except Books.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n serializer = BooksSerializers(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def loadCodeFromFile():\n global notes_text\n\n notes_text.delete(\"1.0\", END)\n load_interface = Tk()\n load_interface.filename = filedialog.askopenfilename( initialdir = (\"../Templates\") ,title = \"Select file\",filetypes = ((\"Bit Tune File\",\"*.btu\"),(\"All Files\",\"*.*\")))\n load_interface.destroy()\n\n with open (load_interface.filename, 'r') as f:\n code = f.read()\n notes_text.insert(END, str(code))", "def code():", "def update_code(self, new_code):\n self.code = new_code\n\n # Fill in the rest", "def get_code(path):\n if path is None:\n return pasteboard.get()\n else:\n with open(path, 'r', encoding='UTF-8') as html_file:\n code = html_file.read()\n return code", "def __init__(self):\n if not os.path.isfile(self.DBFILE):\n self.snippets = {}\n return\n\n with open(self.DBFILE) as fobj:\n content = fobj.read()\n if not content.strip():\n content = \"{}\"\n\n self.snippets = {\n name: Snippet(name, data)\n for name, data in json.loads(content).items()\n }", "async def fetch_gitlab_snippet(session, repo, path, start_line, end_line):\n\n headers = {}\n if 'GITLAB_TOKEN' in os.environ:\n headers['PRIVATE-TOKEN'] = os.environ['GITLAB_TOKEN']\n\n enc_repo = quote_plus(repo)\n\n refs = (await fetch_http(session, f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/branches', 'json', headers=headers) +\n await fetch_http(session, f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/tags', 'json', headers=headers))\n\n ref = path.split('/')[0]\n file_path = '/'.join(path.split('/')[1:])\n for possible_ref in refs:\n if path.startswith(possible_ref['name'] + '/'):\n ref = possible_ref['name']\n file_path = path[len(ref) + 1:]\n break\n\n enc_ref = quote_plus(ref)\n enc_file_path = quote_plus(file_path)\n\n file_contents = await fetch_http(\n session,\n f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/files/{enc_file_path}/raw?ref={enc_ref}',\n 'text',\n headers=headers,\n )\n\n return await snippet_to_embed(file_contents, file_path, start_line, end_line)", "def create_snippet(data, baseurl, timeout, raw):\n try:\n url = baseurl + \"/documents\"\n response = requests.post(url, data.encode('utf-8'), timeout=float(timeout))\n except requests.exceptions.Timeout:\n exit(\"Error: connection timed out\")\n\n dockey = json.loads(response.text)['key']\n return baseurl + (\"/raw/\" if raw else \"/\") + dockey", "def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True", "def write(self):\n #t.run_program('code editor.html')\n last_article = io.read('.last_article')\n t.run_program(f'code \"{last_article}\"')", "async def run(self, ctx: commands.Context, *, codeblock: str):\n matches = self.regex.findall(codeblock)\n if not matches:\n return await ctx.reply(embed=discord.Embed(title=\"Uh-oh\", description=\"Couldn't quite see your codeblock\"))\n lang = matches[0][0] or matches[0][1]\n if not lang:\n return await ctx.reply(embed=discord.Embed(title=\"Uh-oh\",\n description=\"Couldn't find the language hinted in the codeblock or before it\"))\n code = matches[0][2]\n result = await self._run_code(lang=lang, code=code)\n\n await self._send_result(ctx, result)", "def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)", "def is_snippet(abbr, doc_type = 'html'):\n\treturn get_snippet(doc_type, abbr) and True or False", "def delete_book(code: str):\n pass", "def insert_hide_code(note):\n txt = open(PATH_UTILS + 'hide_code.py', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_code_cell(txt)]\n note.cells[-1].metadata = {\"editable\": False, \"deletable\": False, \"tags\": ['run_start']}\n return", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def update(snippet_original, filename):\n## THIS IS APPENDING, NOT REPLACING\n\tlogging.info(\"Searching for {} in {}\".format(snippet_original, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\twriter = csv.writer(f)\n\t\tlogging.debug(\"Searching for '{}'\".format(snippet_original))\n\t\tin_file = False\n\t\tfor row in reader:\n\t\t\tif str(row[1]) == snippet_original:\n\t\t\t\tin_file = True\n\t\t\t\tprint row\n\t\t\t\tnew_text = raw_input(\"Insert new snippet text: \")\n\t\t\t\trow = writer.writerow([str(row[0]), new_text])\n\t\t\t\tprint row\n\t\tif in_file == False:\n\t\t\tprint \"That's not in this file\"\n\tlogging.debug(\"Search complete\")\n\treturn snippet_original, filename", "def edit(self, new_content: object, reason: str = \"\") -> None:\n raise NotImplementedError", "def update_code(self, new_code):\n\n self.code = new_code", "def update_code(self, new_code):\n\n self.code = new_code", "def update(self,newCode,start,end):\r\n\t\t\"\"\" if 'showOnlySelection' is false displays all the code \"\"\"\r\n\t\tstartLine = 0\r\n\r\n\t\tif(self.showOnlySelection):\r\n\t\t\tstartLine = start\r\n\t\t\t(begCode,newCode,endCode) = Splitter().divideFile(newCode,start,end)\t\t\r\n\t\t\t\r\n\t\tself.loadCode(newCode,startLine)", "def _get_code_object(self, cursor):\n raise NotImplementedError", "def get_code():\n client = MongoClient()\n wgapl = client[\"wgapl\"]\n code = wgapl[\"code\"]\n return code", "def books_detail(request, pk):\n try:\n snippet = Books.objects.get(url=pk)\n except Books.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = BooksSerializers(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = BooksSerializers(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:\n with open(file_path) as text_file:\n # Highlight code\n code = text_file.read()\n code_lines = code.splitlines()\n # Prepend line number\n code_lines = [\n f\">{lno:3} | {line}\" if line_no == lno else f\"{lno:4} | {line}\"\n for lno, line in enumerate(code_lines, 1)\n ]\n # # Cut out the snippet\n start_line_no = max(0, line_no - context_lines_count - 1)\n end_line_no = line_no + context_lines_count\n code_lines = code_lines[start_line_no:end_line_no]\n # Join lines\n code = \"\\n\".join(code_lines)\n return code", "def edit_document():", "def snippet_list(request):\n if request.method == 'GET':\n quickstart = Quickstart.objects.all()\n serializer = QuickstartSerializer(snippets, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = QuickstartSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)", "def __edit_line(self, line, code, code_obj): # pylint: disable=R0201\r\n try:\r\n result = eval(code_obj, globals(), locals())\r\n except TypeError as ex:\r\n message = \"failed to execute {}: {}\".format(code, ex)\r\n logger.warning(message)\r\n raise EditorError(message)\r\n if result is None:\r\n raise EditorError(\"cannot process line '{}' with {}\".format(\r\n line, code))\r\n elif isinstance(result, list) or isinstance(result, tuple):\r\n line = ' '.join([str(res_element) for res_element in result])\r\n else:\r\n line = str(result)\r\n return line", "def code(self, code: str):\n\n self._code = code", "def create(self, validated_data: dict) -> Snippet:\n return Snippet.objects.create(**validated_data)", "def get(self, key):\n return self.code_table[key]", "def submission_update_description(request, submission_pk):\n try:\n submission = models.CompetitionSubmission.objects.get(pk=submission_pk)\n if submission.participant.user != request.user:\n raise Http404()\n submission.description = request.POST.get('updated_description')\n submission.save()\n return HttpResponse()\n except models.CompetitionSubmission.DoesNotExist:\n raise Http404()", "def code(self, value: str) -> None:\n self._code = value", "def code(self, code):\n\n self._code = code", "def depart_snippet_literal(self, node):\n self.depart_literal_block(node)", "def test_get_success(self):\n create_snippet('foo')\n create_snippet('bar')\n response = self.get()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.data[0]['content'], 'foo')\n self.assertEqual(response.data[1]['content'], 'bar')", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")" ]
[ "0.6433584", "0.6433584", "0.6343745", "0.62817955", "0.62216586", "0.6008872", "0.59082586", "0.5902986", "0.58660686", "0.581822", "0.5809916", "0.5748296", "0.57467324", "0.57114553", "0.56778467", "0.55851805", "0.5559712", "0.54972357", "0.54327506", "0.5384471", "0.531", "0.5266601", "0.52218527", "0.51400083", "0.50740457", "0.5037137", "0.50359654", "0.5005701", "0.5000395", "0.49750924", "0.49670255", "0.49670255", "0.4958587", "0.49236798", "0.48946002", "0.48851275", "0.48793894", "0.48660585", "0.4843616", "0.48435974", "0.4824824", "0.48228562", "0.48153177", "0.48041427", "0.47856203", "0.47821397", "0.47821397", "0.47806287", "0.4752017", "0.4742307", "0.47389045", "0.47276902", "0.47264257", "0.4714342", "0.47093868", "0.47093868", "0.4707073", "0.47051102", "0.47037885", "0.46833292", "0.46711305", "0.46573433", "0.4646005", "0.46422383", "0.46365497", "0.46296966", "0.46194664", "0.4617428", "0.46113014", "0.4606486", "0.46057937", "0.45963457", "0.458981", "0.45852393", "0.45725027", "0.45725027", "0.45725027", "0.45718348", "0.45672023", "0.45625457", "0.45625457", "0.45586812", "0.45583436", "0.4553432", "0.45517397", "0.45302442", "0.4528275", "0.4526875", "0.45207936", "0.4519466", "0.45065457", "0.45001864", "0.44955692", "0.4491771", "0.44911695", "0.44797397", "0.4471295", "0.4468568", "0.4468568", "0.4468568", "0.4468568" ]
0.0
-1
Loads a json value from a file and converts it to the corresponding python object.
def loadJsonValueFromFile(inputFilePath): with open(inputFilePath) as fileObj: value = json.load(fileObj) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cls, file):\n with open(file, \"r\") as f:\n j = json.load(f)\n return cls(**j)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def convert_json_to_object(file_content):\n object = json.loads(file_content)\n print(object)\n return object", "def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)", "def from_JSON(cls, filename):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return json.load(f, object_hook=class_hook)", "def load_from_json_file(filename):\n with open(filename, 'r') as jFile:\n fString = jFile.read()\n fObj = json.loads(fString)\n return fObj", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def json_load(file_path):\n\n with open(file_path) as f:\n return json_loads(f.read())", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def from_file(cls, file_name):\n\n with open(file_name, 'r') as fi:\n the_dict = json.load(fi)\n return cls.from_dict(the_dict)", "def load(self):\n\n # if the file doesn't exist, return\n if not os.path.exists(self.filepath):\n return\n\n # open the file and read in the raw values\n with open(self.filepath, 'r', encoding='utf-8') as fh:\n raw_values = json.loads(fh.read())\n\n # don't implicitly trust the raw values, but only get known keys\n for key in self.variables:\n if key in raw_values and 'value' in raw_values[key]:\n raw_value = raw_values[key]['value']\n self.variables[key]['value'] = raw_value", "def load_json(filename):\n with open(filename) as file:\n obj = json.load(file)\n return obj", "def load_json(filepath: str):\n with open(filepath, \"r\", encoding=\"utf8\") as f:\n return json.loads(f.read())", "def from_file(cls, file_name: str):\n\n with open(file_name, 'r') as fi:\n input_dict = json.load(fi)\n return cls.from_dict(input_dict)", "def load_from_json_file(filename):\n if type(filename) is not str:\n return\n\n with open(filename, mode=\"r\") as file:\n return json.loads(file.read())", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def JSONtoObject(fileName):\n # TODO: ensure file exists first!!\n \n with open(fileName) as json_data:\n d = json.load(json_data)\n \n return d\n #return json.loads(d, object_hook=_json_object_hook)", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def from_file(cls: Type[T], file_path: path_t) -> T:\n file_path = Path(file_path)\n _assert_file_extension(file_path, \".json\")\n\n with file_path.open(encoding=\"utf-8\") as fp:\n json_dict = json.load(fp)\n return cls(**json_dict)", "def load_from_json_file(filename):\n with open(filename, encoding=\"utf-8\") as round:\n return json.load(round)", "def _json_from_file(file: IO[AnyStr]) -> Json:\n if os.path.getsize(file.name) > 0:\n return typing.cast(Json, json.load(file))\n return {}", "def load_json(filepath: str):\n with open(filepath, encoding=\"utf-8\") as f:\n return json.load(f)", "def from_json(fname):\n with open(fname, 'r') as fh:\n d = json.load(fh)\n return d", "def read_json(file_or_path):\n try:\n with (open(file_or_path, 'r') if isinstance(file_or_path, (str, bytes)) else file_or_path) as f:\n obj = json.load(f)\n except IOError:\n obj = json.loads(file_or_path)\n return obj", "def load_json(fpath):\n with open(fpath, \"r\") as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, \"r\") as my_file:\n return json.loads(my_file.read())", "def load_from_json_file(filename):\n with open(filename, mode=\"r\", encoding=\"utf-8\") as a_file:\n return json.loads(a_file.read())", "def load(self, filename):\n _json = self.read_json(filename, byteify=True)\n _json = self._byteify(_json, ignore_dicts=True)\n if not _json:\n return None\n _dict = {k : self._parse_value(v) for k, v in _json.items()}\n return _dict", "def from_json(path: str):\n with open(path) as f:\n return json.load(f)", "def loadJsonFromFile(filename):\n with open(filename) as f:\n return json.loads(f.read())", "def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)", "def load(cls, path):\n\n with open(path) as f:\n d = json.load(f, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def load(cls, path):\n\n with open(path) as f:\n d = json.load(f, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n return(json.loads(f.read()))", "def read_json(fpath):\n with open(fpath, 'r') as f:\n obj = json.load(f)\n return obj", "def json_load(fp):\n with _iotools.open_file(fp, \"r\") as f:\n return json.load(f, cls=DataDecoder)", "def read_object_from_file(file_name):\n if os.path.exists(file_name) is False:\n print (\"Error read path: [%s]\" % file_name)\n return None\n with open(file_name, 'r') as f:\n try:\n obj = json.load(f)\n except Exception:\n print (\"Error json: [%s]\" % f.read()[0:10])\n return None\n return obj", "def load(filename):\n\n try:\n with open(filename) as data:\n return json.load(data)\n except:\n return None", "def load(file, **kwargs):\n extension = os.path.splitext(file)\n if extension not in {'.json', '.axjson'}:\n raise RuntimeError('Given extension ({}) not supported'.format(extension))\n with open(file) as f:\n data = json.load(f)\n if extension == '.json':\n return data\n else:\n json_str = json.dumps(data)\n return loads(json_str, **kwargs)", "def from_path(cls, path: str):\n with open(path) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def from_json_file(cls, json_file):\r\n with open(json_file, \"r\", encoding='utf-8') as reader:\r\n text = reader.read()\r\n return cls.from_dict(json.loads(text))", "def load_json(filepath: str):\n if not filepath:\n return None\n\n abs_path = _resolve_relative_path(filepath)\n with open(abs_path) as f:\n raw_json = f.read()\n\n return json.loads(raw_json)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def load_json(filename):\n\n with open(filename, encoding=constants.load_encoding) as file:\n return loads(file.read())", "def load_json(file_name):\n return json.load(open(file_name))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, 'r', encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def load(fp, *args, **kwargs): \n state = json.load(fp, *args, **kwargs)\n return unserialize(state)", "def util_load_json(path):\n with io.open(path, mode=\"r\", encoding=\"utf-8\") as f:\n return json.loads(f.read())", "def loadJSONFile(filename):\n\twith open(filename, 'r') as f:\n\t\treturn json.loads(f.read())", "def load_json(filename_or_dict):\n\tif isinstance(filename_or_dict, str):\n\t\tinput_file = open(filename_or_dict, encoding='utf-8')\n\t\tjson_dict = json.loads(input_file.read())\n\t\tinput_file.close()\n\t\treturn json_dict\n\treturn filename_or_dict", "def load_from_json_file(filename):\n with open(filename, 'r', encoding='utf8') as f:\n return json.load(f)", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\") as reader:\n return cls.from_dict(json.load(reader))", "def load_from_json_file(filename):\n import json\n with open(filename, 'r') as s:\n return json.load(s)", "def load_from_json_file(filename):\n\n jason_str = \"\"\n\n with open(filename, 'r', encoding='utf-8') as f:\n jason_str = f.read()\n\n return json.loads(jason_str)", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)", "def from_json(self, file_path):\n with open(file_path) as file:\n jsonstr = file.read()\n handler_dict = json.loads(jsonstr)\n self.from_dict(handler_dict)", "def read_json(fn):\n with open(fn) as f:\n return json.load(f, object_hook=_operator_object_hook)", "def load_json(file_path):\n\n \n with open(file_path, 'r') as json_file:\n # print(file_path)\n dictionary = json.loads(json_file.read())\n\n return dictionary", "def load_json_file(file_path: str):\n with open(file_path) as file:\n return json.loads(file.read())[0]", "def __load_class_representation(self, filename):\n\n # Reads in the reverse dictionary from the given file.\n with open(filename) as file:\n return json.load(file)", "def json_from_file(name):\n with open(name) as f_p:\n return json.load(f_p)", "def load_json(value):\n try:\n return json.loads(value)\n except json.JSONDecodeError as e:\n raise JSONDecodeError(e) from e", "def load_from_file(config_path):\n return load_json_file(config_path)", "def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n json_string = json.load(f)\n return json_string", "def read_json(f):\n with open(f, \"r\") as fin:\n return json.loads(fin.read())", "def read_json_to_object(fn):\n\n with open(fn, \"r\") as fid:\n obj = json.load(fid, object_hook=lambda d: SimpleNamespace(**d))\n return obj", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def load_json_document(f):\n return json.load(f)", "def load_json_from_file(path):\n \n json_file = open(path, \"r\")\n json_string = json_file.read()\n json_file.close()\n return json.loads(json_string)", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def json_load(path):\n with open(path) as f:\n dictionary = json.load(f)\n return dictionary", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def import_json(self) -> dict:\n with open(self.path, encoding=\"utf8\") as json_file:\n return json.load(json_file)", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def _load_value(value):\n try:\n return json.loads(value, object_pairs_hook=OrderedDict)\n except ValueError as e: # pragma: no cover\n raise RuntimeError(f'Failed to JSON load data \"{value}\" ({e}).')", "def from_json(cls, filepath):\n #If file cannot be found, create an empty file and write an empty cls data into it.\n import os\n if (os.path.isfile(filepath) == False):\n empty_cls = cls()\n empty_cls.to_json(filepath)\n print('New Json Created: Type=', empty_cls.data['type'])\n return empty_cls\n\n #Open file and load load content\n with open(filepath, 'r') as fp:\n data = json.load(fp)\n print('Json Loaded: Type=', data['type'])\n\n assert data['type'] == cls.__name__ , \"Deserialized object type: %s is not equal to %s.\" % (data['type'] , cls.__name__)\n return cls.from_data(data)", "def _loadf(fname):\n with open(fname, encoding=\"ISO-8859-1\") as f:\n return json.load(f)", "def __load_json(self, path):\n try:\n with Path(path).open('r') as f:\n return json.load(f)\n except ValueError as ve:\n six.raise_from(ValueError(\"error while loading the fixture %s\" % path), ve)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def _load(self, json_str, filepath):\n # pylint: disable=protected-access\n return self.json_o._load(json_str, filepath)", "def load_json(path):\n with open(path) as data_file:\n return json.load(data_file)", "def load_file(file_path):\n with gzip.open(file_path, \"rb\") as fp:\n return json.loads(fp.read().decode('utf-8'))", "def openFile(self, path):\n with open(path) as f:\n return json.load(f)" ]
[ "0.7691063", "0.76900375", "0.76079774", "0.7595927", "0.75809187", "0.7557681", "0.7495783", "0.74881333", "0.74459565", "0.7439162", "0.7416306", "0.7365671", "0.73619384", "0.7338387", "0.73190105", "0.7312332", "0.7281531", "0.7272493", "0.72698015", "0.7256082", "0.7254488", "0.72533065", "0.7246577", "0.72437817", "0.7240949", "0.7234397", "0.72303057", "0.7222472", "0.7212235", "0.7202836", "0.71775085", "0.7174268", "0.7170034", "0.7170034", "0.7154319", "0.7152203", "0.7128382", "0.7126119", "0.71207744", "0.7117334", "0.71124756", "0.7105718", "0.71018225", "0.7101471", "0.7101471", "0.7094588", "0.70854414", "0.70804876", "0.7072299", "0.7071448", "0.70711803", "0.70711803", "0.70711803", "0.7063832", "0.7063579", "0.70567864", "0.7037914", "0.70317596", "0.7031731", "0.7030542", "0.7029599", "0.7025081", "0.70229554", "0.7015381", "0.70028555", "0.6991316", "0.6987656", "0.69751173", "0.69687045", "0.6963819", "0.6961607", "0.695886", "0.6958499", "0.6954693", "0.69535375", "0.69420904", "0.69420904", "0.6937808", "0.6929803", "0.69129854", "0.69085264", "0.6906315", "0.6904852", "0.689427", "0.6883665", "0.6874198", "0.6863079", "0.6860725", "0.68520457", "0.6842705", "0.6839489", "0.6833684", "0.6828487", "0.6823859", "0.6823859", "0.68235046", "0.68167365", "0.6814144", "0.68071777", "0.6789793" ]
0.74380505
10
Initializes turtle instance for turtle game.
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_turtle():\n turtle.up()\n turtle.home()", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}", "def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)", "def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")", "def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();", "def initialize_plotter(width, height, min_x, max_x, min_y, max_y):\n global x_begin, x_end, x_increment\n turtle.delay(0)\n x_begin, x_end = min_x, max_x\n turtle.setup(width=width, height=height)\n turtle.screensize(width, height)\n turtle.setworldcoordinates(min_x, min_y, max_x, max_y)\n x_increment = (max_x - min_x)/width\n turtle.hideturtle()\n turtle.pencolor('black')\n turtle.penup()\n turtle.setposition(min_x, 0)\n turtle.setheading(0)\n turtle.pendown()\n turtle.forward(max_x - min_x)\n turtle.penup()\n turtle.setposition(0, min_y)\n turtle.setheading(90)\n turtle.pendown()\n turtle.forward(max_y - min_y)", "def __init__(self):\r\n pen.up()\r\n pen.setheading(0)\r\n pen.hideturtle()\r\n turtle.title(\"My name\")\r\n pen.speed(0)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def __init__(self):\n self.x = int(constants.SCREEN_WIDTH/2)\n self.y = int(constants.SCREEN_HEIGHT/2)\n self.DX = self.getRandSpeed()\n self.DY = self.getRandSpeed()\n self.RADIUS = 5", "def main():\n args = _argument_parsing()\n _prepare_turtle()\n _if_else_statement(args)\n turtle.mainloop()", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n try:\n self._turtle.shape('pen.gif')\n except:\n self._turtle.shape('classic')\n self._turtle.color('red')\n self.speed = 0\n \n #pair = self._turtle.color()\n self._pencolor = self._turtle.color()[0]\n self._fillcolor = self._turtle.color()[0]", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()", "def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()", "def main():\r\n intialize()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()\r\n shapes()\r\n shapes2()\r\n print (\"Close the window\")\r\n turtle.done()", "def __init__(self):\n self._symbols = set()\n self._blank_symbol = None\n self._states = set()\n self._start_state = None\n self._end_states = set()\n self._transitions = {}\n\n self._current_state = None\n self._tape = None\n self._head = None", "def initialize_simulation(self) -> Simulation:\n pass", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n self.path = []\n MyAgent.customFood = None\n MyAgent.foodLeft = 0\n MyAgent.specialWalls = {}\n self.followOne = False\n if self.index == 0:\n MyAgent.finding = []\n MyAgent.finding.append(False)", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5", "def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3", "def __init__(self, win):\n \n # draw the base shot of the launcher\n base = Circle(Point(0,0), 3)\n base.setFill(\"red\")\n base.setOutline(\"red\")\n base.draw(win)\n\n # save the window and create initial angle and velocity\n self.win = win\n self.angle = radians(45.0)\n self.vel = 40.0\n \n # create inital \"dummy\" arrow\n self.arrow = Line(Point(0,0), Point(0,0)).draw(win)\n # replace it with the correct arrow\n self.redraw()", "def create_options_for_test_runner(self):\n self.options = turtle.Turtle(\n verbosity=VERBOSITY_NORMAL,\n summary_mode=False,\n )", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def __init__(self):\r\n self.position=(0,0)\r\n self.direction = 0.\r\n self.speed = 0\r\n self.state = 0\r\n pass", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def _addTurtle(self,turt):\n assert (type(turt) == Turtle), \"Parameter %s is not a valid Turtle object\" % `turt`\n self._turtles.append(turt)", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def terminal_init(self):\n pass", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def _setup(self, width=turtle._CFG[\"width\"], height=turtle._CFG[\"height\"],\n startx=turtle._CFG[\"leftright\"], starty=turtle._CFG[\"topbottom\"]):\n if not hasattr(self._root, \"set_geometry\"):\n return\n \n sw = self._root.win_width()\n sh = self._root.win_height()\n if isinstance(width, float) and 0 <= width <= 1:\n width = sw*width\n if startx is None:\n startx = (sw - width) / 2\n if isinstance(height, float) and 0 <= height <= 1:\n height = sh*height\n if starty is None:\n starty = (sh - height) / 2\n self._root.set_geometry(width, height, startx, starty)\n self.update()", "def main():\n # Your code here\n draw_graph(turtle, -500, -200, 0)", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius", "def __init__(self, win): \r\n\r\n # draw the base shot of the launcher \r\n base = Circle(Point(0,0), 3) \r\n base.setFill('red')\r\n base.setOutline('red')\r\n base.draw(win) \r\n\r\n # save the window and create initial angle and velocity\r\n self.win = win \r\n self.angle = radians(45.0)\r\n self.vel = 40.0 \r\n\r\n # create initial 'dummy' arrow \r\n self.arrow = Line(Point(0,0), Point(0, 0)).draw(win) \r\n # replace it with the correct arrow \r\n self.redraw()", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def __init__(self, shape):\n self.eyes = [(), ()]\n self.shape = shape\n self.state = 0\n self.new_frame()", "def __init__(self):\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep','GoToPlay'])\n\n self.rate = rospy.Rate(1) \n self.counter = 0", "def setUp(self):\n\n self.veh = Vehicle(0, 0)\n self.R = Random(seed)", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def __init__(self):\n\t\t# Screen size settings\n\t\t# Note that these values are commented out because we're using\n\t\t# full screen mode.\n\t\t#self.screen_width = 1200\n\t\t#self.screen_height = 600\n\n\t\t# Color definitions and background/color setting\n\t\tmidnight_blue = (0, 3, 36)\n\t\tblack = (0, 0, 0)\n\t\twhite = (255, 255, 255)\n\t\tself.bg_color = midnight_blue\n\n\t\tself.bg_image = pygame.image.load('images/space_bg.jpg')\n\n\t\t# Rocket settings\n\t\tself.max_speed = 3\n\t\tself.acceleration = 0.01\n\t\tself.rotation_speed = 3\n\t\t# Starts facing upwards\n\t\tself.rotation_angle = 271\n\n\t\t# Bullet settings\n\t\tself.bullet_speed = 8\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60, 60, 60)\n\t\tself.bullets_allowed = 3", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def __init__(self, max_step=-1):\n self.environment = mls.rl.common.Environment()\n self.environment.game = mls.rl.common.Game(max_step=max_step)\n self.environment.current_state = self.environment.game.init_state(self.environment)", "def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)", "def irma_setup():\n import tkinter\n turtle.setup(965, 600) # set size of window to size of map\n\n wn = turtle.Screen()\n wn.title(\"Hurricane Irma\")\n\n # kludge to get the map shown as a background image,\n # since wn.bgpic does not allow you to position the image\n canvas = wn.getcanvas()\n \n turtle.setworldcoordinates(-90, 0, -17.66, 45) # set the coordinate system to match lat/long\n\n map_bg_img = tkinter.PhotoImage(file=\"images/atlantic-basin.gif\")\n\n # additional kludge for positioning the background image\n # when setworldcoordinates is used\n canvas.create_image(-1175, -580, anchor=tkinter.NW, image=map_bg_img)\n\n \n t = turtle.Turtle()\n t.speed(1)\n wn.register_shape(\"images/hurricane.gif\")\n t.shape(\"images/hurricane.gif\")\n\n return (t, wn, map_bg_img)", "def __init__(self, term: Terminal):\n self.term = term\n self.selection_index = 0\n self.draw()\n self.event_loop()", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def __init__(self):\n self._pos = Vector2(250, 250)\n self._color = (randint(0, 255), randint(0, 255), randint(0, 255), 255)\n\n self._ticks_alive = 0\n self._dead = False", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._bottle = None\n self._cowboy = None\n self._furnishing = None\n self._has_hazard = False\n self._is_balcony = False\n self._tile_east = None\n self._tile_north = None\n self._tile_south = None\n self._tile_west = None\n self._x = 0\n self._y = 0\n self._young_gun = None", "def __init__(self, BridgeObj, speed=0.005):\n self.speed = speed\n self.BridgeObj = BridgeObj\n print(\"[RainbowAll] Mode Initialized. Speed : \" + str(speed))", "def getturtle(self):\n return self", "def setup(self):\n\n # Create the Sprite lists\n self.sprite_list = arcade.SpriteList()\n\n r = 60\n for x in rand_range(0, 100 * math.pi, scale=math.pi / 5):\n star = arcade.Sprite(\"../../resources/arcade/gold_1.png\")\n star.center_x = SCREEN_WIDTH / 2 + r * math.cos(x)\n star.center_y = SCREEN_HEIGHT / 2 + r * math.sin(x)\n star.seed = scale_generator(x=random() * math.pi, offset=.5, step=.01)\n star.scale = next(star.seed)\n self.sprite_list.append(star)\n r += 3", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def makeTurtles(num):\n turtles = []\n for i in range(num):\n t = turtle.Turtle()\n #t.speed(0) # can set this for superfast disc movement\n t.up()\n t.shape('square')\n t.shapesize(stretch_len=(2 + i)) #bottom turtle is longest\n t.goto(0, num - i)\n turtles.append(t)\n return turtles", "def _spawn_turtle(self, trt_x, trt_y, name=None):\n\n\t\tif name is None or name == \"\":\n\t\t\tname = self._create_unique_turtle_name()\n\t\telif self._has_turtle(name):\n\t\t\treturn \"\"\n\n\t\tturtle = Turtle(name, Point(trt_x, trt_y))\n\t\tself._turtles[name] = turtle\n\n\t\trospy.loginfo(\"New turtle [%s] at x=[%d], y=[%d]\", name, trt_x, trt_y)\n\n\t\treturn name", "def __init__(self):\n #Screen settings:\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (51,153,255)\n\n #Ship settings:\n self.ship_speed_factor = 25\n\n #Bullet settings:\n self.bullet_width = 50\n self.bullet_height = 5\n self.bullet_color = 60,60,60\n self.bullets_allowed = 5\n\n #Target settings:\n self.target_direction = 1 #Works like a flag. 1 represents down, -1 represents up.\n\n #Missed shots allowed\n self.misses_starting_with = 3\n\n #Increases the speed of the target by this factor each time it is hit\n self.speedup_scale = 1.2\n\n #Initializes the dynamic settings\n self.initialize_dynamic_settings()", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def up():\n turtleTmp.penup()", "def __init__(self):\n rospy.init_node('TruckSimNode')\n\n self.steer_angle_topic = rospy.get_param('~steer_angle_topic', \"steer_angle\")\n self.chassis_force_topic = rospy.get_param('~chassis_force_topic', \"chassis_force\")\n\n rospy.Subscriber(\"joy\", Joy, self.joyCb)\n\n self.steer_pub = rospy.Publisher(self.steer_angle_topic, Float64, queue_size=1)\n self.chassis_force_pub = rospy.Publisher(self.chassis_force_topic, Float64, queue_size=1)\n\n # array of joy axes:\n # 0: turn - (+ve = left)\n # 1: acceleration (+ve = increase in current direction)\n # 2: gear\n self.steer = 0\n self.accel = 0\n self.gear = 0\n self.steer_joint = Float64()\n self.chassis_force = Float64()", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.patrol_mode = False\n self.enemy_not_detected = True\n print(\"+++++ self.patrol_mode = {} y self.enemy_not_detected = {}\".format(self.patrol_mode, self.enemy_not_detected))\n self.positionX = 0\n self.positionY = 0\n self.direction = ['forward', 'right', 'backward', 'left']\n self.offset = [0, 1, 0, -1]\n self.index = 0\n self.pointing = self.direction[self.index]\n\n\n # Connect two large motors on output ports B and C\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n self.weapon = MediumMotor(OUTPUT_A)\n self.sound = Sound()\n self.leds = Leds()\n self.ir = InfraredSensor()\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()\n threading.Thread(target=self._proximity_thread, daemon=True).start()", "def __init__(self, width, height):\r\n super().__init__(width, height)\r\n\r\n self.rifle = Rifle()\r\n self.score = 0\r\n\r\n self.bullets = []\r\n\r\n # TODO: Create a list for your targets (similar to the above bullets)\r\n self.targets = []\r\n\r\n arcade.set_background_color(arcade.color.WHITE)", "def initialize(self):\n self.currState = self.startState", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def initialize_graphics(self):\n self.renderer = vtk.vtkRenderer()\n self.window = vtk.vtkRenderWindow()\n self.window.AddRenderer(self.renderer)\n self.renderer.SetBackground(1.0, 1.0, 1.0)\n self.window.SetSize(1000, 1000)\n\n # Create a trackball interacter to transoform the geometry using the mouse.\n self.interactor = vtk.vtkRenderWindowInteractor()\n self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n self.interactor.SetRenderWindow(self.window)\n\n style = ClickInteractorStyle(self)\n self.interactor.SetInteractorStyle(style)\n style.SetCurrentRenderer(self.renderer)", "def initialize(self):\n self.tree = ROOT.TTree('tree', 'tree')\n self.simhitcount = []\n self.simhitarrays = np.array(self.simhitcount, dtype=np.int32)\n self.digitcount = []", "def __init__(self, target_velocity, dt, model_type, robot_type):\n super(StraightEnv, self).__init__(\n target_velocity=target_velocity,\n dt=dt,\n model_type=model_type,\n robot_type=robot_type\n )\n\n # Reward function parameters\n self._lambda1 = 0.25\n\n # State in frame of target straight line. See get_initial_state()\n # function for info.\n self._current_state = np.zeros(6)\n\n # Target line to follow. See get_initial_state() function for info.\n self._target_y = 0\n self._target_yaw = 0\n\n # Initialize nodes and set up ROS topics\n rospy.init_node('rl_planner')\n self.publisher = rospy.Publisher('commands/keyboard',\n ackermann_msgs.msg.AckermannDriveStamped, queue_size=1)\n rospy.Subscriber('ekf_localization/odom', nav_msgs.msg.Odometry,\n self._odometry_callback)\n self._sensor_stamp = rospy.Time.now()\n\n # Wait this number of timesteps before computing another action (this\n # is similar to setting a larger dt during training)\n self._num_states_needed = 3\n self._num_states_received = 0", "def init():\n rino.initialize.initialize()", "def setUp(self):\n self.m=Maze()\n self.m.reset()", "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "def main():\n tortue_1 = turtle.Turtle()\n tortue_1.shape(\"turtle\")\n tortue_1.color(\"aquamarine4\")\n longueur = 200\n largeur = 200\n nbre_carres = 3\n angle_entre_carres = 15\n for i in range(nbre_carres):\n trace_rectangle(tortue_1, longueur, largeur)\n tortue_1.left(angle_entre_carres * (i + 1))\n\n turtle.exitonclick() # Empêche la fenêtre de se fermer automatiquement à la fin du tracé", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def game_initialise(self):\n super().game_initialise()\n #\n # It might be unnecessary that the terminate lock is a threading.Lock()\n # instance. An ordinary property might do just as well, because it only\n # gets set to True.\n self._terminateLock = threading.Lock()\n self._mainLock = threading.Lock()\n self._tickLock = threading.Lock()\n self._tickRaise = None\n self._skippedTicks = 0\n #\n # Reference time for when the game engine was started.\n self._gameInitialisePerf = time.perf_counter()\n self._tickPerf = 0.0", "def __init__(self):\n self._tyrannosaurus = []\n self._triceratops = []", "def turtle_race():\n # Create the turtle screen and two turtles (leave this as the first line).\n screen, artist, writer = turtle_setup()\n\n # Rename the artist turtle and move her to the left, above the x-axis.\n flojo = artist # Flo-Jo, https://en.wikipedia.org/wiki/Florence_Griffith_Joyner\n flojo.shape( \"turtle\" )\n flojo.color( \"blue\" ) # USA!\n flojo.penup()\n flojo.setposition( -WIDTH // 2 + MARGIN, MARGIN * 2 )\n flojo.setheading( 0 )\n flojo.pendown()\n\n # Create a new turtle, below the x-axis, to race against the turtle formerly known as artist.\n usain = turtle.Turtle() # Usain Bolt, https://en.wikipedia.org/wiki/Usain_Bolt\n usain.shape( \"turtle\" )\n usain.color( \"green\" ) # Jamaica\n usain.penup()\n usain.setposition( -WIDTH // 2 + MARGIN, -MARGIN * 2 )\n usain.setheading( 0 )\n usain.pendown()\n\n # TODO 7: Implement the turtle race as described in the lab document.\n writer.write( \"And they're off . . .\", align=\"center\", font=( \"Times\", FONT_SIZE, \"bold\" ) )\n while True:\n flojo.forward( random.randint( MARGIN // 4, MARGIN ) )\n usain.forward( random.randint( MARGIN // 4, MARGIN ) )\n\n # Wait for the user to click before closing the window (leave this as the last line).\n screen.exitonclick()", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS", "def __init__(self):\n self.tape_tag = None\n self.independentVariableShapeList = []\n self.dependentVariableShapeList = []", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self):\n\n # initialize window\n self.win = graphics.GraphWin(\"Lunar Lander Game\", 300, 500)\n \n # transform coordinates\n self.win.setCoords(0, -10, 300, 600)\n\n self.surface_polygon = self.create_surface()\n self.surface_polygon.draw(self.win)\n self.background()\n \n\n self.lander_polygon = None\n # Draws two different thrust buttons\n self.b1 = Button(graphics.Point(100, 560), 80, 20, 'Thrust')\n self.b2 = Button(graphics.Point(200, 560), 80, 20, 'No Thrust')\n self.b1.draw(self.win)\n self.b2.draw(self.win)\n \n # Draws text values for altitude, velocity, and fuel\n self.alt_num = graphics.Text(graphics.Point(50, 400), 'Altitude: ')\n self.vel_num = graphics.Text(graphics.Point(50, 450), 'Velocity: ')\n self.fuel_num = graphics.Text(graphics.Point(50, 500), 'Fuel: ')\n self.alt_num.draw(self.win)\n self.vel_num.draw(self.win)\n self.fuel_num.draw(self.win)", "def __init__(self, width, height):\n super().__init__(width, height)\n\n self.ball = Ball()\n self.paddle = Paddle()\n self.score = 0\n\n # These are used to see if the user is\n # holding down the arrow keys\n self.holding_left = False\n self.holding_right = False\n\n arcade.set_background_color(arcade.color.WHITE)", "def initialise(self, grid):\n self.total_reward = 0\n\n self.next_state = self.buildState(grid)", "def initialize_state(self):\n # Initialize everything to zero\n self.stateC = self.initializer((self.nSym, 1))\n self.stateC_prev = self.initializer((self.nSym, 1))\n self.state = self.toNeural(self.stateC)\n self.state_prev = self.toNeural(matrix=self.stateC_prev)\n self.inpC = self.initializer((self.nSym, 1))\n self.inpS = self.toNeural(self.inpC)\n\n # Create full traces\n self.create_full_traces()\n\n # Initialize Lotka Volterra\n self.LV_Matrices()\n\n # Allocate Temperature and Lambda\n self.vars['T'] = 0\n self.vars['lambda'] = 0", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def __init__(self, game_size):\n\n self.game_size = game_size\n self.screen_size = 10 * self.game_size, 20 * self.game_size\n self.game_board = GameBoard()\n self.background_color = 55, 55, 40\n self.tetrino_set = dict()\n self.tetrino_id = 1\n self.score = 0" ]
[ "0.81658715", "0.7127116", "0.704404", "0.6922751", "0.6751538", "0.66928184", "0.6515024", "0.64465225", "0.6263604", "0.62292695", "0.6227585", "0.61615515", "0.58694047", "0.58318806", "0.580501", "0.575872", "0.57573175", "0.57200843", "0.5657598", "0.55897486", "0.55519253", "0.55367", "0.5531332", "0.5524546", "0.54946375", "0.54783654", "0.5474411", "0.5465285", "0.54579556", "0.5451918", "0.5443242", "0.54414374", "0.5435695", "0.54300785", "0.5418254", "0.54117614", "0.5409607", "0.5396779", "0.53939545", "0.53897077", "0.53880763", "0.5384566", "0.53774816", "0.536926", "0.5364795", "0.53554094", "0.5352628", "0.5351074", "0.534192", "0.5339102", "0.53327787", "0.5329405", "0.53291845", "0.5327037", "0.53147304", "0.5313368", "0.53100884", "0.53097063", "0.529647", "0.52906066", "0.52855444", "0.5282653", "0.52805537", "0.52614075", "0.52603626", "0.5251999", "0.525147", "0.52491117", "0.52467275", "0.5246514", "0.5244943", "0.5243944", "0.52434766", "0.52415764", "0.52394205", "0.52342343", "0.52341986", "0.5232677", "0.522779", "0.5224894", "0.52227247", "0.52198696", "0.52178717", "0.52098244", "0.5208877", "0.5199129", "0.5197909", "0.51957834", "0.519511", "0.519442", "0.5170354", "0.51654404", "0.51564807", "0.51534474", "0.51520365", "0.5143733", "0.5143733", "0.51416296", "0.51406914", "0.5140521" ]
0.8317453
0
Defines the turtle movement for the initialized turtle instance and executes that movement.
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_name = initialize(turtle_shape, bg_color, turtle_color, turtle_speed) for i in range(36): for i in range(4): turtle_name.forward(200) turtle_name.right(90) turtle_name.right(10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_turtle(self):\n self.forward(self.move_speed)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def move():\n Robot.move()", "def movement(self):", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def goto(x, y):\n turtleTmp.setposition(x, y)", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}", "def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def move(self, friction = 0.0):\n try:\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n self.goto(newX, newY)\n # apply friction\n self.dx = self.dx * (1 - friction)\n self.dy = self.dy * (1 - friction)\n except:\n print(\"Error, probably because dx and dy are not properties of the turtle\")", "def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def startMovementAll(self):\n self.startMovementX()\n self.startMovementY()\n self.startMovementZ()", "def setMovement(self, movement):\n self.ma = movement", "def run(self):\n super(MovementControl,self).run()", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)", "def begin_auto_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.state = 'automoving'\n self.x_vel = self.vector_dict[direction][0]\n self.y_vel = self.vector_dict[direction][1]\n self.move_timer = self.current_time", "def start(self):\n self.startAngMovementALl()\n self.startMovementAll()", "def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def step(self, move):", "def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()", "def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()", "def move(self):\n \n self.position = self.wander()", "def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position", "def move(): #py:move\n RUR._move_()", "def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def moving(self):\n if self.sensors['proximity'][0].imminent_collision:\n self.motion.stopMove()\n self.posture.goToPosture('Stand', self.SPEED)\n self.behavior_ = self.BEHAVIORS.thinking\n\n elif not self.motion.moveIsActive():\n # Walker will always keep moving forward,\n # if no obstacles are found.\n move_to = (self.STRIDE, 0, 0)\n self.motion.post.moveTo(*move_to)\n logger.info('Walking: %s', move_to)", "def move(self,dt):\n raise NotImplementedError(\"Robot.move\")", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def main():\n args = _argument_parsing()\n _prepare_turtle()\n _if_else_statement(args)\n turtle.mainloop()", "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)", "def move(self) -> None:\n\n if self.move_up:\n self.__moveUpIfPossible()\n if self.move_down:\n self.__moveDownIfPossible()", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n fstate = self._turtle.fill()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(False)\n self._turtle.penup()\n self._turtle.setposition(x,y)\n self._turtle.pendown()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(True)", "def move(self) -> None:\n self.delta_time += 1 / 30 # FPS is 30 frames per second\n\n if self.is_active:\n self.y -= self.velocity * self.delta_time + 0.5 * self.gravity * (self.delta_time ** 2) # s = ut + 0.5at^2\n self.velocity = self.velocity + self.gravity * self.delta_time # v = u + at\n\n # Limit the velocity to the terminal velocity\n self.velocity = max(self.terminal_velocity, self.velocity)\n\n # Limit the y-pos to within the top of the screen and the base\n self.y = min(max(0, self.y), BACKGROUND_SPRITE.get_height() - Base.Height - Bird.Height)\n\n # Animation\n # -e^-x graph is found suitable for the slow descent\n # The value of the function converges to -90 as x peaks out at 4.5\n # The value of the function converges to 0 as x becomes negative\n self.angle = -np.exp(self.velocity / self.terminal_velocity * 4.5) + (self.velocity > 0) * self.up_angle\n else:\n self.y = self.init_y + np.sin(self.delta_time * np.pi) * self.glide_height", "def update(self, time_step):\n a = [0,0]\n F = self.force()\n for i in [0,1]: # We have to update x and y\n a[i] = self.force()[i] / self.mass\n self.velocity[i] = self.velocity[i] + a[i]*time_step\n self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy\n self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation", "def move(self):\n pass", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def _animate(self):\n steps = (1, 7, 14)\n if self.rect.x < self.start_x - 100:\n self.change_dir = False\n elif self.rect.x > self.start_x + 100:\n self.change_dir = True\n self.direction = -1 if self.change_dir else 1\n self.rect.x += self.direction * choice(steps)", "def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)", "def move(self, direction, cycles):\n\t\tpass", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def walk(self, dir):\n x, y, theta = dir\n self.motionProxy.moveToward(x, y, theta, [[\"Frequency\", 1]])\n self.isWalking = True", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def run(self):\n while self.direction != \"\":\n if self.direction == \"decrease\":\n if self.position > 200:\n self.position -= 15\n elif self.direction == \"increase\":\n if self.position < 800:\n self.position += 15\n if self.direction != \"neutral\":\n self.move_joint(self.position, 900)\n time.sleep(0.1)", "def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()", "def move(self, direction):\n pass", "def move(self, direction, speed):\n self.motor_A(direction, speed)\n self.motor_B(direction, speed)", "def move(self, step):\n\n status = self.read()\n Logger.getLogger().debug(\"Status in move method: %s\", status)\n # while the motors are moving we don't want to start another movement\n if status > CurtainsStatus.OPEN or self.motor.value:\n return\n\n self.target = step\n\n # deciding the movement direction\n if self.steps() < self.target:\n self.__open__()\n elif self.steps() > self.target:\n self.__close__()", "def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)", "def movement(self, action):\r\n\r\n #if its moving horizontally only can move vertically in the next move\r\n if self.velocities[1] == 0:\r\n if action == 0 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = -1\r\n if action == 1 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = 1\r\n\r\n #if its moving vertically only can move horizontally in the next move\r\n if self.velocities[0] == 0:\r\n if action == 2 :\r\n self.velocities[0] = -1\r\n self.velocities[1] = 0\r\n if action == 3 :\r\n self.velocities[0] = 1\r\n self.velocities[1] = 0\r\n \r\n self.displacement()", "def run(self, **kwargs): # equivalent to step/move\n self.update_game_status()\n self.everyone_move(**kwargs)", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def main():\n\n\t# Initialising ROS node\n\trospy.init_node(\"turtlebot_move\")\n\n\t# Reading parameters from the launch file\n\tnpy_path = rospy.get_param(\"/publish_velocity/npy_file_path\")\n\n\t# Reading the generated A* path from the .npy file\n\t# rospack = rospkg.RosPack()\n\t# npy_path = os.path.join(rospack.get_path('turtlebot_astar'), 'src/path_dumps/path_final.npy')\n\trobot_path_list = np.load(npy_path, allow_pickle=True)\n\n\tglobal goal\n\tgoal.x, goal.y = robot_path_list[0].getXYCoords()\n\n\t# Creating the Publisher and the Subscriber\n\tpub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n\tsub = rospy.Subscriber(\"/odom\", Odometry, newOdom, (robot_path_list, pub))\n\n\tr = rospy.Rate(4)\n\tspeed = Twist()\n\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\n\t\t\tinc_x = goal.x - x\n\t\t\tinc_y = goal.y - y\n\n\t\t\tangle_to_goal = math.atan2(inc_y, inc_x)\n\n\t\t\tif abs(angle_to_goal - theta) < 0.1:\n\t\t\t\tspeed.linear.x = 0.5\n\t\t\t\tspeed.angular.z = 0.0\n\t\t\telif (angle_to_goal - theta) < 0:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = -0.3\n\t\t\telse:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = 0.3\n\n\t\t\t# Publishing the Velocity Inputs for the TurtleBot on the topic /cmd_vel\n\t\t\tpub.publish(speed)\n\t\t\tr.sleep()\n\n\texcept rospy.exceptions.ROSInterruptException as ros_int:\n\t\tprint(ros_int)\n\texcept Exception as e:\n\t\traise e", "def player_movement(self):", "def begin_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.timer = self.current_time\n self.move_timer = self.current_time\n self.state = 'moving'\n\n if self.rect.x % 32 == 0:\n self.y_vel = self.vector_dict[self.direction][1]\n if self.rect.y % 32 == 0:\n self.x_vel = self.vector_dict[self.direction][0]", "def go(self):\n global Moving\n\n if TargetVal > ActualVal:\n Moving = 'bak'\n elif TargetVal < ActualVal:\n Moving = 'fwd'\n\n MoveMotor()", "def move(x,y):\r\n pass", "def main():\n draw_sun()\n draw_pavement()\n draw_building()\n martin.goto(12, 40) # lines 171, 173, and 175 move the turtle down to space out the windows on the building.\n draw_windows()\n martin.goto(12, 0)\n draw_windows()\n martin.goto(12, -40)\n draw_windows()\n draw_door()\n draw_doorknob()", "def move_dart(self):\n global level\n if level == 0:\n self.rect.centerx+=self.delta\n if self.rect.centerx >= 1000: \n self.delta = -1\n elif self.rect.centerx < 500:\n self.delta = 1\n elif level == 1:\n self.rect.centery+=self.delta\n if self.rect.centery <= 150: \n self.delta = 2\n elif self.rect.centery > 650:\n self.delta = -2\n elif level == 2:\n self.rect.centerx+=self.delta #To make changes in both x and y direction\n self.rect.centery+=self.delta\n if self.rect.centerx < 100 or self.rect.centery <= 100: \n self.delta = random.randint(1,10) #adds random speeds to the motion\n elif self.rect.centerx >= 900 or self.rect.centery > 700:\n self.delta = -random.randint(1,10)", "def move(self):\n \n self.position = self.explore()", "def move(self, *step):\n self.x += step[0]\n self.y += step[1]", "def set_make_move(function: Callable) -> None:\n main.make_move = function", "def move(self, twist: Optional[Twist] = None):\n if twist is None:\n left = right = 0\n self.navigation_goal = None\n else:\n linear = np.clip(twist.linear.x, -1, 1)\n angular = np.clip(twist.angular.x, -1, 1)\n left, right = (linear - angular) / 2, (linear + angular) / 2\n # # always give a robot the full velocity at least on one side\n # if (greater := max(abs(left), abs(right))) > 0:\n # left, right = left / greater, right / greater\n\n self.locomotion_lock.acquire()\n self.v_left = SPEEDUP * left\n self.v_right = SPEEDUP * right\n self.locomotion_lock.release()", "def moving(self, moving):\n\n self._moving = moving", "def move_start_node(self, x, y):", "def _move(self, dx, dy):\n pass # must override in subclass", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def main():\n tortue_1 = turtle.Turtle()\n tortue_1.shape(\"turtle\")\n tortue_1.color(\"aquamarine4\")\n longueur = 200\n largeur = 200\n nbre_carres = 3\n angle_entre_carres = 15\n for i in range(nbre_carres):\n trace_rectangle(tortue_1, longueur, largeur)\n tortue_1.left(angle_entre_carres * (i + 1))\n\n turtle.exitonclick() # Empêche la fenêtre de se fermer automatiquement à la fin du tracé", "def move(self, coordinates, direction):\n pass", "def _setup_move(self, position):\n\n def done_moving(**kwargs):\n self.log.debug(\"%s async motion done\", self.name)\n self._done_moving(success=True)\n\n if self.done is None:\n # No done signal, so we rely on put completion\n moving_val = 1 - self.done_value\n self._move_changed(value=moving_val)\n\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n\n if self.actuate is not None:\n self.setpoint.put(position, wait=True)\n\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False, callback=done_moving)\n else:\n self.setpoint.put(position, wait=False, callback=done_moving)", "def move(self):\n self.position += self.speed", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def move(self):\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed\n # Next, account for gravity\n (self.angle, self.speed) = addVectors((self.angle, self.speed), gravity)\n # Then, friction / drag\n self.speed *= drag", "def move(self):\n # This block is used to move into the living room.\n if self.room_name == \"living\" or self.room_name == \"Living\":\n self.living_room()\n # This block is used to open the door.\n elif self.room_name == \"Door\" or self.room_name == \"door\":\n self.door()\n # This block is used to move into the kitchen.\n elif self.room_name == \"Kitchen\" or self.room_name == \"kitchen\":\n self.kitchen()\n # This block is used to move into the hallway.\n elif self.room_name == \"Hallway\" or self.room_name == \"hallway\":\n self.hallway()\n # This block is used to move into the bathroom.\n elif self.room_name == \"Bathroom\" or self.room_name == \"bathroom\":\n self.bathroom()\n # This block is used to move into the basement.\n elif self.room_name == \"Basement\" or self.room_name == \"basement\":\n self.basement()\n # This block is used to move into the bedroom.\n elif self.room_name == \"Bedroom\" or self.room_name == \"bedroom\":\n self.bedroom()\n # This block is used to move into the attic.\n elif self.room_name == \"Attic\" or \"attic\":\n self.attic()", "def _set_action(self, action):\n\n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n # We convert the actions to speed movements to send to the parent class of Parrot\n linear_speed_vector = Vector3()\n angular_speed = 0.0\n\n if action == 0: # FORWARDS\n linear_speed_vector.x = self.linear_forward_speed\n self.last_action = \"FORWARDS\"\n elif action == 1: # BACKWARDS\n linear_speed_vector.x = -1*self.linear_forward_speed\n self.last_action = \"BACKWARDS\"\n elif action == 2: # STRAFE_LEFT\n linear_speed_vector.y = self.linear_forward_speed\n self.last_action = \"STRAFE_LEFT\"\n elif action == 3: # STRAFE_RIGHT\n linear_speed_vector.y = -1*self.linear_forward_speed\n self.last_action = \"STRAFE_RIGHT\"\n elif action == 4: # UP\n linear_speed_vector.z = self.linear_forward_speed\n self.last_action = \"UP\"\n elif action == 5: # DOWN\n linear_speed_vector.z = -1*self.linear_forward_speed\n self.last_action = \"DOWN\"\n\n # We tell drone the linear and angular speed to set to execute\n self.move_base(linear_speed_vector,\n angular_speed,\n epsilon=0.05,\n update_rate=10)\n\n rospy.logdebug(\"END Set Action ==>\"+str(action))", "def __init__(self, target_velocity, dt, model_type, robot_type):\n super(StraightEnv, self).__init__(\n target_velocity=target_velocity,\n dt=dt,\n model_type=model_type,\n robot_type=robot_type\n )\n\n # Reward function parameters\n self._lambda1 = 0.25\n\n # State in frame of target straight line. See get_initial_state()\n # function for info.\n self._current_state = np.zeros(6)\n\n # Target line to follow. See get_initial_state() function for info.\n self._target_y = 0\n self._target_yaw = 0\n\n # Initialize nodes and set up ROS topics\n rospy.init_node('rl_planner')\n self.publisher = rospy.Publisher('commands/keyboard',\n ackermann_msgs.msg.AckermannDriveStamped, queue_size=1)\n rospy.Subscriber('ekf_localization/odom', nav_msgs.msg.Odometry,\n self._odometry_callback)\n self._sensor_stamp = rospy.Time.now()\n\n # Wait this number of timesteps before computing another action (this\n # is similar to setting a larger dt during training)\n self._num_states_needed = 3\n self._num_states_received = 0", "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n if self.loop is True:\n ref_ind = ref_ind % traj_len\n else:\n if ref_ind > traj_len-1:\n ref_ind = traj_len-1\n if closest_ind == traj_len-1:\n self.at_dest = True\n else:\n ref_ind = closest_ind\n ref_state = self.np_trajectory[:, int(ref_ind)]\n\n # update vehicle state.\n '''if self.class_name == 'TruckVehicle':\n self.update_vehicle_state_qualisys()\n self.UDP_receive()\n if self.data == \"-1.00\":\n self.set_control_commands_pp(ref_state, ref_ind)\n else:\n steer = int(self.data[-6:-3])\n throttle = int(self.data[:-6]) + 5\n hw_port.set_command(throttle,steer,2)\n self.update_truck_hardware()\n else:\n self.set_control_commands(ref_state)\n self.update_vehicle_state()'''\n\n self.set_control_commands(ref_state, ref_ind)\n self.update_vehicle_state()\n\n # publish vehicle state.\n vehicle_state = msgs.VehicleState(self.vehicle_id, self.class_name,\n self.x, self.y, self.yaw, self.v)\n self.pub_state.publish(vehicle_state)\n self.update_current_node()\n\n #The way that the stop light waiting works, this is necessary\n if not self.waiting_at_stop:\n self.check_for_traffic_light()\n self.get_traffic()", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def move(self, t, s):\n raise NotImplementedError", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def _go(self, distance):\n ende = self._position + self._orient * distance\n self._goto(ende)", "def _move_tetrino(self, tetrino, x, y):\n tetrino.location_offset[constant.X] += x\n tetrino.location_offset[constant.Y] += y\n tetrino.update_location()", "def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()", "def move(self, speed=1):\n self.set_motor(self.left_motor, 'left', speed)\n self.set_motor(self.right_motor, 'right', speed)\n time.sleep(0.5)", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def move_tower(self, x, y):\n self.x = x\n self.y = y\n self.menu.x = x\n self.menu.y = y\n self.menu.update()", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)" ]
[ "0.7389288", "0.6786604", "0.6750962", "0.6614437", "0.6599587", "0.64865434", "0.6407805", "0.64050645", "0.6318544", "0.6280543", "0.6203746", "0.61156297", "0.6057648", "0.6023432", "0.60230684", "0.600326", "0.59755194", "0.5961362", "0.59582186", "0.59399325", "0.5905899", "0.58884126", "0.58734035", "0.58535314", "0.58469945", "0.58469945", "0.5843393", "0.58384305", "0.5828846", "0.5822152", "0.5811579", "0.5808796", "0.57854784", "0.57837", "0.57816696", "0.577448", "0.57706434", "0.57689637", "0.57512456", "0.57480365", "0.57448584", "0.57431066", "0.57225674", "0.56975085", "0.5692034", "0.56915367", "0.5684571", "0.56774664", "0.5666643", "0.5666643", "0.56526345", "0.5644061", "0.5639617", "0.56376857", "0.563651", "0.56282395", "0.56247705", "0.5616173", "0.56139225", "0.5611572", "0.5600322", "0.55920553", "0.5579841", "0.5578875", "0.5575689", "0.5575178", "0.55724055", "0.55719763", "0.5563371", "0.556002", "0.55567926", "0.55520695", "0.553951", "0.5528591", "0.5525976", "0.55219334", "0.55193704", "0.5518345", "0.55087394", "0.5508433", "0.55027086", "0.5489268", "0.5487127", "0.5482071", "0.5478185", "0.5472774", "0.5469942", "0.54683965", "0.5467946", "0.54466444", "0.5446459", "0.54451275", "0.54252064", "0.54216427", "0.54210144", "0.54190546", "0.5413922", "0.54127157", "0.5399348", "0.5398032" ]
0.69048256
1
Add this command to the main parser.
def add_commands(parser, subparsers): subparser = subparsers.add_parser('libraries', help='search for LogicBlox libraries') subparser.set_defaults(func=execute_libraries) subparser.add_argument('libraries', nargs='*', help="libraries to locate") subparser.add_argument('--libpath', help="library path to search") subparser.add_argument('--dependencies', '-d', default=False, action='store_true', help="print the libraries upon which a library depends") subparser.add_argument('--quiet', '-q', default=False, action='store_true', help="do not display any information. Used when simply querying the exit code") subparser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extend_cli(self, subparser):", "def add_args(self, parser):", "def add(self, name, command):", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def add_parse_arguments(self, parser):\n parser.add_argument('command', help='The daemon command: start|status|stop|restart')\n parser.add_argument('--pid_file', help='The pid_file of the daemon')", "def add_arguments(self, parser):\n\n cmd = self # make sure we can use sub parser in django. via stack_overflow\n\n class SubParser(CommandParser):\n \"\"\"Use to avoid the error when using sub parser in django's add_arguments method.\"\"\"\n def __init__(self, **kwargs):\n super(SubParser, self).__init__(cmd, **kwargs)\n\n # add custom sub commands.\n\n subparsers = parser.add_subparsers(\n title=\"sub commands\",\n parser_class=SubParser,\n dest='sub_command',\n help='Sub commands you can use.'\n )\n\n # actions to start or stop socket server.\n\n server = subparsers.add_parser('server', help=\"Server Commands\")\n server.add_argument(\n 'action',\n metavar='ACTION',\n choices=self.socket_server_actions,\n help='Actions is: <%s>' % '|'.join(self.socket_server_actions),\n )\n\n # actions of targets when calling server is running.\n\n proxy = subparsers.add_parser('proxy', help=\"Proxy Commands\")\n proxy.add_argument(\n '-a', '--action',\n metavar='ACTION',\n required=True,\n choices=self.proxy_job_actions,\n help='Actions is: <%s>' % '|'.join(self.proxy_job_actions)\n )\n proxy.add_argument(\n '-t', '--targets',\n metavar='TARGET',\n nargs='*',\n help='Targets can be empty which means ALL, you can list targets by <./manage.py mirrordata proxy -a ping>.'\n )", "def add(cls, subparsers):\n subparser = subparsers.add_parser(\n name=cls.__tool_name__(),\n description=cls.__get_description__())\n\n cls.__add_arguments__(subparser)\n subparser.set_defaults(func=cls.from_args)\n return subparser", "def create_parser(self, prog_name, subcommand):\r\n self.prog_name = \"{} {}\".format(prog_name, subcommand)\r\n return super(TrackedCommand, self).create_parser(prog_name, subcommand)", "def add_arguments(self, parser):", "def __add_arguments__(cls, parser):", "def create_parser(self, prog_name, subcommand):\r\n # Hack __main__ so --help in dev_appserver_main works OK.\r\n sys.modules['__main__'] = dev_appserver_main\r\n return super(Command, self).create_parser(prog_name, subcommand)", "def addOption(self, parser):\n pass", "def additional_command(self):\n pass", "def custom(self, command):\n self.command.append(command)\n return self", "def _add_to_cli(self, parser, group=None):\n container = self._get_argparse_container(parser, group)\n kwargs = self._get_argparse_kwargs(group)\n prefix = self._get_argparse_prefix('', group.name if group else None)\n deprecated_names = []\n for opt in self.deprecated_opts:\n deprecated_name = self._get_deprecated_cli_name(opt.name,\n opt.group)\n if deprecated_name is not None:\n deprecated_names.append(deprecated_name)\n self._add_to_argparse(parser, container, self.name, self.short,\n kwargs, prefix,\n self.positional, deprecated_names)", "def add_command(self, command):\n self.command.extend(command)", "def add_command_line_arguments(self, parser):\n # parser.add_option(...)\n pass", "def add_cli(self, subparser):\n new_parser = subparser.add_parser('create', help='create new scratch file')\n new_parser.add_argument('name', nargs='?', default=None, help=\"Optional Name to be given to the file, \"\n \"default name is an increment of 'scratch##'\")\n new_parser.set_defaults(func=self.action)\n return subparser", "def add_cmd(self, name: str, help_str: str, cmd_fn: typing.Callable, arg: str = None, arg_help: str = None):\n self.cmd_names.append(name)\n cmd = self.cli_subparsers.add_parser(name, help=help_str)\n cmd.set_defaults(func=cmd_fn)\n if arg is not None:\n cmd.add_argument(arg, help=arg_help)", "def addSubParser( parentParser, cmdName ) :\n parser = parentParser.add_parser( cmdName, help='Generate a new UBOS package scaffold.' )\n parser.add_argument( '--directory', required=True, help='Directory where to create the package scaffold')\n parser.add_argument( '--template', required=True, help='Name of the template to use' )\n parser.add_argument( '--json', required=False, help='Settings file' )", "def add_command(self, name, desc, func=None):\n assert type(name) == str\n assert type(desc) == str\n if func is not None:\n assert callable(func)\n\n def wrap_argparse(parser, args, func):\n \"\"\"Convenience function calls argparse with list of args and calls func with them\"\"\"\n pargs = parser.parse_args(args)\n return func(**vars(pargs))\n\n assert name not in self.cmd2func, \"Command with same name already defined on this level!\"\n\n self.cmd_list.append((name, desc))\n if func is None:\n m = necapy(name=name, desc=desc)\n self.cmd2func[name] = m.parse\n return m\n else:\n ap = argparse.ArgumentParser(description=desc)\n self.cmd2func[name] = lambda args: wrap_argparse(ap, args, func)\n return ap", "def get_parser(self, prog_name):\n parser = super(AbstractCommand, self).get_parser(prog_name)\n parser.add_argument(\n '--log-file', help='record output to FILE'\n )\n return self.extend_parser(parser)", "def add_command(self, name, cmd):\n if (\n not isinstance(cmd, types.FunctionType) and\n not issubclass(cmd, AbstractCommand)\n ):\n print_failure(\"{}-Command must inherit from AbstractCommand!\".format(name), 1)\n\n # setup command\n cmd = cmd() # type: AbstractCommand\n command = self._subparsers.add_parser(\n name,\n help=cmd.help,\n description=colored(cmd.description, 'yellow'),\n formatter_class=ColoredHelpFormatter,\n add_help=False\n )\n command.add_argument(\n '-h', '--help',\n action='help',\n default=argparse.SUPPRESS,\n help='Show this help message and exit.'\n )\n command.titles('Arguments', 'Options', color='cyan')\n\n # Add arguments and bind command\n for arg, opt in cmd.arguments.items():\n command.add_argument(arg, **opt)\n command.set_defaults(func=cmd.handle)\n self.commands[name] = command", "def setup_parser(self, parser, args):\r\n\r\n pass", "def add_arguments(self, parser):\n pass", "def add_arguments(self, parser):\n pass", "def add_arguments(parser):\n return", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--pythonpath',\n help='A directory to add to the Python path',\n )\n self.add_arguments(parser)\n return parser", "def setup_parser(self, parser):", "def build_parser(self, parser: ArgumentParser) -> None:", "def addArguments(self, parser):\r\n self.argparseHelper.addArg(parser)", "def at_cmdset_creation(self):\n self.add(Command())", "def cli_add_command(parent_parser):\n parser = parent_parser.add_parser(\"setup\", description=cli_add_command.__doc__)\n\n def run_it(_):\n from .util import print_important_env\n print_important_env()\n\n log.info(\"AKRR Setup\")\n\n _config_setup()\n setup()\n\n parser.set_defaults(func=run_it)", "def add_additional_args(cls, parser: argparse.ArgumentParser):\n pass", "def add_subcommand(self, command):\n\n if self.subcommand_parser is None:\n self.subcommand_parser = self.parser.add_subparsers(\n dest='command', help='Please select one command mode below',\n title='Command modes'\n )\n self.subcommands = {}\n\n if not isinstance(command, ScriptCommand):\n raise ScriptError('Subcommand must be a ScriptCommand instance')\n\n parser = self.subcommand_parser.add_parser(\n command.name,\n help=command.short_description,\n description=command.description,\n epilog=command.epilog,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n self.subcommands[command.name] = command\n command.script = self\n\n if callable(getattr(command, '__register_arguments__', None)):\n command.__register_arguments__(parser)\n\n return parser", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the charm or bundle\")", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the charm or bundle\")", "def add_command(subparsers):\n\n parser = subparsers.add_parser('create', help=create.__doc__)\n\n parser.add_argument('-r', '--recreate', action='store_true', help='If set, I\\'ll first erase the current database')\n parser.add_argument('-v', '--verbose', action='count', help='Increase verbosity?')\n parser.add_argument('-d', '--image-dir', default='/idiap/project/hface/databases/polimetric_thermal_database/Registered/', help=\"Change the relative path to the directory containing the images of the Polarimetric database.\")\n\n parser.set_defaults(func=create) #action", "def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def add_arguments(self):\n super().add_arguments()\n self.parser.add_argument(\n \"sql_command\",\n help=\"The SQL commmand to execute. Use <odb> to reference the filename.\",\n type=str\n )", "def parse(self, commands):\n raise NotImplementedError()", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def parse(self, args):\n pass", "def add_args(parser: argparse.ArgumentParser):\n pass", "def add_cmd(self, cmd, name=\"\"):\n if cmd:\n self.cmds.add((cmd, name))", "def add_options(self, parser):\n pass", "def add_options(cls, parser):\n pass", "def register_command(subparser):\n update_parser = subparser.add_parser('update', help='Run the log files through an updater. Used to update '\n 'between versions of autology')\n update_parser.set_defaults(func=_main)\n\n # Arguments\n update_parser.add_argument('-f', '--files', help='Update the files that are currently defined in the log '\n 'directories', action='store_true')\n update_parser.add_argument('-t', '--templates', help='Install a new output template', action='store_true')\n update_parser.add_argument('-T', '--template-definition', help='Define a template definition to install',\n default=template_utilities.DEFAULT_TEMPLATES_URL)", "def add_arguments(self, sub_parser):\n sp = sub_parser", "def cmd(self, command):\n self._commands.append(command)", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))", "def add_command(self, cmd: Command):\n self._command_list.append(cmd)", "def declare_opts(self):\n self.parser = ArgumentParser(description=DESCRIPTION,\n prog=self.info.get('prog'),\n epilog='Happy Listening',\n formatter_class=RawDescriptionHelpFormatter,\n )\n self.parser.add_argument('--version', action='version',\n version='%(prog)s {version}'.format(**self.info))\n # Add all options declare in OPTS\n for opt in OPTS:\n opt_names = opt.pop('sw')\n self.parser.add_argument(*opt_names, **opt)\n # Add sub commands\n spa = self.parser.add_subparsers(\n title=f'{self.info[\"prog\"]} commands as positional arguments',\n description=f\"\"\"Use them after optionnal arguments.\\n\"{self.info[\"prog\"]} command -h\" for more info.\"\"\",\n metavar='', dest='command')\n for cmd in CMDS:\n helpmsg = cmd.pop('help')\n cmd, args = cmd.popitem()\n _ = spa.add_parser(cmd, description=helpmsg, help=helpmsg)\n for arg in args:\n name = arg.pop('name', None)\n if name:\n _.add_argument(name, **arg)", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def init_parser(subparsers):\n parser = subparsers.add_parser(COMMAND, help=\"Add a new task to the task list\")\n parser.add_argument(\"title\", type=str, help=\"The title of the new task\")\n parser.add_argument(\"description\", type=str, help=\"The description of the new task\")\n doto.cli.cmd.task.init_task_flags(parser)", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name to register in Charmhub\")", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name to register in Charmhub\")", "def add_extra_arguments(self, parser):\n pass", "def add_argparse_subparser(subparsers):\n\n new_sub_parser = subparsers.add_parser(\n PluginManager.argparse_subparser_name(), help=\"plugin commands\"\n )\n PluginManager.__argparse_subparser = new_sub_parser\n plugin_subparsers = new_sub_parser.add_subparsers(\n dest=PluginManager.__root_subparser_name\n )\n\n sub_sub_parser = plugin_subparsers.add_parser(\n \"list\", help=\"list the available plugins\"\n )\n sub_sub_parser.add_argument(\n \"--all\",\n dest=\"show_all\",\n action=\"store_true\",\n default=False,\n help=\"show all loaded plugins (default is False)\",\n )\n sub_sub_parser.add_argument(\n dest=\"list_filter\",\n default=None,\n help=\"filter\",\n nargs=\"?\",\n type=PluginManager.__list_filter_type,\n )\n sub_sub_parser = plugin_subparsers.add_parser(\n \"info\", help=\"information on a specific plugin\"\n )\n sub_sub_parser.add_argument(\n dest=\"info_filter\",\n default=None,\n type=PluginManager.__info_filter_type,\n help=\"an id\",\n )", "def update_argparser(self, parser):\n pass", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the library file (e.g. 'db')\")", "def add_argument(self, parser):\n parser.add_argument(*self.args, **self.kwargs)", "def add_command(self, command_info):\n self.commands[command_info.name] = command_info", "def makecmd(self, options):", "def _add_to_cli(self, parser, group=None):\n super(BoolOpt, self)._add_to_cli(parser, group)\n self._add_inverse_to_argparse(parser, group)", "def subparser(cls, parent):\n parser = parent.add_parser('attach', help='attach to container')\n parser.add_argument(\n '--image',\n help='image to instantiate and attach to',\n )\n parser.add_argument(\n 'command',\n nargs='*',\n help='image to instantiate and attach to',\n )\n parser.set_defaults(class_=cls, method='attach')", "def add_arg_parser(subparsers):\n # add\n add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')\n add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',\n help=\"Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'\")\n add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')\n add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')\n add_p.set_defaults(func=lambda args: _add(args))", "def add_args_to_subparser(the_parser, subcommand_name):\n\n the_parser.add_argument(CmdArgs.verbose_optional, help=CmdArgs.verbose_help,\n action='store_true',\n )\n\n if subcommand_name in DCA_VISUALIZATION_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.pdb_chain_id, help=CmdArgs.pdb_chain_id_help)\n the_parser.add_argument(CmdArgs.pdb_file, help=CmdArgs.pdb_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.dca_file, help=CmdArgs.dca_file_help)\n the_parser.add_argument(CmdArgs.rna_secstruct_file_optional,\n help=CmdArgs.rna_secstruct_file_help,\n )\n the_parser.add_argument(CmdArgs.linear_dist_optional,\n help=CmdArgs.linear_dist_help, type = int,\n )\n the_parser.add_argument(CmdArgs.contact_dist_optional,\n help=CmdArgs.contact_dist_help, type = float,\n )\n the_parser.add_argument(CmdArgs.num_dca_contacts_optional,\n help = CmdArgs.num_dca_contacts_help, type = int,\n )\n the_parser.add_argument(CmdArgs.wc_neighbor_dist_optional, type= int,\n help = CmdArgs.wc_neighbor_dist_help,\n )\n the_parser.add_argument(CmdArgs.pdb_id_optional, help = CmdArgs.pdb_id_help)\n\n if subcommand_name in FILE_CONTENT_SUBCOMMANDS:\n if subcommand_name == 'pdb_content':\n the_parser.add_argument(CmdArgs.pdb_file, help = CmdArgs.pdb_file_help)\n if subcommand_name in MSA_TRIMMING_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.max_gap_optional,\n type = float, help = CmdArgs.max_gap_help,\n )\n if subcommand_name == 'trim_by_refseq':\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.remove_all_gaps_optional,\n help= CmdArgs.remove_all_gaps_help, action='store_true',\n )\n if subcommand_name == 'trim_by_gap_size':\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n return None", "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "def parser(self, prog, **kwargs):\n prog += ' ' + self.name\n parser = argparse.ArgumentParser(prog=prog,\n description=self.summary,\n **kwargs)\n for args, kwargs in self.args or ():\n parser.add_argument(*args, **kwargs)\n return parser", "def add_step_args(cls, parser):", "def add_command(self, name, fct):\r\n self.cmds[name] = fct", "def add(self, command): # type: (BaseCommand) -> Application\r\n self.add_command(command.config)\r\n command.set_application(self)\r\n\r\n return self", "def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n \"--skip-download\",\n action=\"store_false\",\n dest=\"download\",\n default=True,\n help=\"Skip downloading of the ZIP archive\"\n )\n parser.add_argument(\n \"--skip-clean\",\n action=\"store_false\",\n dest=\"clean\",\n default=True,\n help=\"Skip cleaning up the raw data files\"\n )\n parser.add_argument(\n \"--skip-load\",\n action=\"store_false\",\n dest=\"load\",\n default=True,\n help=\"Skip loading up the raw data files\"\n )\n parser.add_argument(\n \"--keep-files\",\n action=\"store_true\",\n dest=\"keep_files\",\n default=False,\n help=\"Keep zip, unzipped, TSV and CSV files\"\n )\n parser.add_argument(\n \"--no-archive\",\n action=\"store_true\",\n dest=\"no_archive\",\n default=False,\n help=\"Store an archive the downloaded zip file on the version model\"\n )\n parser.add_argument(\n \"--noinput\",\n action=\"store_true\",\n dest=\"noinput\",\n default=False,\n help=\"Download the ZIP archive without asking permission\"\n )\n parser.add_argument(\n \"--test\",\n \"--use-test-data\",\n action=\"store_true\",\n dest=\"test_data\",\n default=False,\n help=\"Use sampled test data (skips download, clean a load)\"\n )\n parser.add_argument(\n \"-a\",\n \"--app-name\",\n dest=\"app_name\",\n default=\"calaccess_raw\",\n help=\"Name of Django app with models into which data will \"\n \"be imported (if other not calaccess_raw)\"\n )", "def add_options(self, parser):\n parser.add_argument(\n '--name',\n required=True,\n help='The human-readable name for the extension. This is '\n 'required.')\n parser.add_argument(\n '--class-name',\n default=None,\n help='The class name for the extension (generally in CamelCase '\n 'form, without spaces). If not provided, this will be '\n 'based on the extension name.')\n parser.add_argument(\n '--package-name',\n default=None,\n help='The name of the package (using alphanumeric ). '\n 'If not provided, this will be based on the extension '\n 'name.')\n parser.add_argument(\n '--package-version',\n default='1.0',\n help='The version for your extension and package.')\n parser.add_argument(\n '--summary',\n default=None,\n help='A one-line summary of the extension.')\n parser.add_argument(\n '--description',\n default=None,\n help='A short description of the extension.')\n parser.add_argument(\n '--author-name',\n default=None,\n help='The name of the author for the package and extension '\n 'metadata. This can be a company name.')\n parser.add_argument(\n '--author-email',\n default=None,\n help='The e-mail address of the author for the package and '\n 'extension metadata.')\n parser.add_argument(\n '--enable-configuration',\n action='store_true',\n default=False,\n help='Whether to enable a Configure button and view for the '\n 'extension.')\n parser.add_argument(\n '--enable-static-media',\n action='store_true',\n default=False,\n help='Whether to enable static media files for the package.')", "def add_args(cls, parser):\n dc = getattr(cls, \"__dataclass\", None)\n if dc is not None:\n # do not set defaults so that settings defaults from various architectures still works\n gen_parser_from_dataclass(parser, dc(), delete_default=True)", "def add_command(self, name, command_class, ns=None):\n ep = EntryPointWrapper(name, command_class)\n self.add_command_ep(ep, ns=ns)", "def fill_parser(self, parser):\n parser.add_argument(\n \"name\",\n nargs=\"?\",\n help=(\n \"The name of the charm (optional, will get the name from\"\n \"metadata.yaml if not given)\"\n ),\n )", "def add_subcommands(cls, parser: argparse.ArgumentParser) -> None:\n if cls.SUBCOMMANDS:\n subparsers = parser.add_subparsers(title=\"subcommands\", metavar=\"\", dest='cmd')\n for subcmd_class in cls.SUBCOMMANDS:\n parsers = subcmd_class.get_args()\n subcmd_class.parser = parsers[-1]\n\n subparser = subparsers.add_parser(\n subcmd_class.NAMES[0],\n aliases=subcmd_class.NAMES[1:],\n parents=parsers,\n help=subcmd_class.HELP,\n epilog=subcmd_class.EPILOG)\n subparser.set_defaults(command_class=subcmd_class)\n subcmd_class.customize_subparser(subparser)", "def command(self):\n raise NotImplementedError", "def add_argument(self, *args, **kwds):\n # no argument to add to stack\n if not args:\n return self\n\n # consume Command objects if exists\n if isinstance(args[0], Command):\n self._arg_stack.extend(args[0]._arg_stack)\n target = args[0]\n return self.add_argument(*args[1:], **kwds)\n\n # stack args, kwds to pass to parser.add_argument\n self._arg_stack.append(('normal', args, kwds))\n return self", "def add_parser(subp, raw):\n tmpp = subp.add_parser('mkmodel', help='create a model file',\n formatter_class=raw,\n description=textwrap.dedent(DESC))\n tmpp.add_argument('model', type=str, metavar='MODELNAME',\n help='name of the model')\n tmpp.add_argument('type', type=str, choices=['ssa', 'sde'],\n help='model type')\n tmpp.add_argument('--nspecs', type=positive_type,\n help='number of species')\n tmpp.add_argument('--nreacs', type=positive_type,\n help='number of reactions')\n tmpp.add_argument('--dim', type=positive_type,\n help='dimension of phase space')\n tmpp.add_argument('-z', type=str, choices=['row', 'col'],\n help='state change matrix format')", "def add_option(self, argparser, option):\n if not isinstance(option, CommandOption):\n raise TypeError('Command option object must be an instance of CommandOption')\n option.add_to_parser(argparser)", "def run(self, command):\n self.commands.append(command)\n return BookBuilder.run(self, command)", "def _add(self, command, *args):\n return self._traverse_command(command, *args, _to_index=args, _to_deindex=[], _bypass_proxy=True)", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of charm or bundle\")\n parser.add_argument(\n \"-r\",\n \"--revision\",\n type=SingleOptionEnsurer(int),\n required=True,\n help=\"The revision to release\",\n )\n parser.add_argument(\n \"-c\",\n \"--channel\",\n action=\"append\",\n required=True,\n help=\"The channel(s) to release to (this option can be indicated multiple times)\",\n )\n parser.add_argument(\n \"--resource\",\n action=\"append\",\n type=ResourceOption(),\n default=[],\n help=(\n \"The resource(s) to attach to the release, in the <name>:<revision> format \"\n \"(this option can be indicated multiple times)\"\n ),\n )", "def add_arguments(self, parser):\n parser.add_argument('asins', nargs='+', type=str)", "def addCommand(function, command, description, usage = None, minArgs = 0, maxArgs = 0, showUsage = True):\n None", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def add_arguments(self, sub_parser):\n sp = sub_parser\n sp.add_argument('--fork', nargs='?')", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def cmd(self):", "def _init_add(self):\n def add(core, args):\n return core.add(args.start, args.stop, args.task)\n\n usage = 'stl add start stop [task]'\n desc = (\n 'directly add a log entry; '\n 'you can also do this from python, take a look at '\n 'stl.core.Core.add()'\n )\n\n subp = self.subparsers.add_parser(\n 'add', usage=usage, description=desc,\n help=desc[:desc.find(';')])\n\n subp.add_argument(\n 'start',\n help='when work on the task started; use %%Y-%%m-%%dT%%H:%%M')\n subp.add_argument(\n 'stop',\n help='when work on the task stopped; use %%Y-%%m-%%dT%%H:%%M')\n subp.add_argument(\n 'task', nargs='?', default='',\n help='the task being worked on; optional')\n\n subp.set_defaults(func=add)", "def register(\n self,\n root_parser: argparse.ArgumentParser,\n subparsers: argparse._SubParsersAction,\n ) -> None:\n self.root_parser = root_parser\n parser = subparsers.add_parser(\n self.name,\n aliases=self.aliases,\n help=self.help,\n description=self.help,\n add_help=self.add_help,\n )\n parser.set_defaults(command=self)\n self.configure(parser)", "def run(self, version, parser, run=None, **kwargs):\n pass", "def _add_arguments(self):\r\n self._parser.add_argument(\r\n '-s', '--server',\r\n required=True,\r\n help=\"enter server name\")\r\n self._parser.add_argument(\r\n '-db', '--database',\r\n required=True,\r\n help='enter database name')\r\n self._parser.add_argument(\r\n '-u', '--username',\r\n help='enter username')\r\n self._parser.add_argument(\r\n '-p', '--password',\r\n help='enter password')\r\n #self._parser.add_argument(\r\n # '-h', '--help',\r\n # help='show this help message and exit')\r", "def _setup_command(self):\r\n raise NotImplementedError", "def extend_parser(self, parser):\n return parser" ]
[ "0.728577", "0.7169099", "0.7166533", "0.7153756", "0.7049907", "0.7003788", "0.7001368", "0.69773185", "0.6960154", "0.6897418", "0.68806356", "0.6854973", "0.6824182", "0.6771863", "0.6764204", "0.67025226", "0.6698706", "0.6660946", "0.6652996", "0.6588826", "0.6572274", "0.65630543", "0.6538937", "0.651705", "0.6502487", "0.6502487", "0.6496003", "0.6473968", "0.64599735", "0.645764", "0.64522356", "0.64474267", "0.6435873", "0.64343643", "0.64123887", "0.6412287", "0.6412287", "0.64109886", "0.6393871", "0.63917774", "0.63892996", "0.63867325", "0.63660926", "0.6355915", "0.6350854", "0.6344078", "0.6339701", "0.633529", "0.633213", "0.6328331", "0.6305953", "0.62931263", "0.6291281", "0.6286243", "0.62829936", "0.62729394", "0.62699884", "0.6240672", "0.6240672", "0.62139744", "0.6212917", "0.61962163", "0.6189658", "0.61879617", "0.6175112", "0.6169207", "0.6160461", "0.6154669", "0.614046", "0.6138636", "0.6135717", "0.6135717", "0.61349356", "0.6133392", "0.61261404", "0.6120665", "0.6119703", "0.61172295", "0.61096424", "0.61074704", "0.6099701", "0.6095834", "0.60881114", "0.6086152", "0.6081407", "0.6080336", "0.6076366", "0.60661006", "0.60639054", "0.60624135", "0.6057129", "0.6046824", "0.6041791", "0.6034107", "0.6023697", "0.60215086", "0.60045063", "0.59955007", "0.5989982", "0.59870875", "0.59819686" ]
0.0
-1
Saves summary statistics as a csv file in the current directory and returns the output filename.
def save_summary_statistics_csv( experiment_name, roi_summary_data, save_directory_path: str = "" ): # Create directories on the path if they don't already exist Path(save_directory_path).mkdir(parents=True, exist_ok=True) csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime_for_filename(datetime.now())}).csv" csv_filepath = Path(save_directory_path) / csv_filename roi_summary_data.to_csv(csv_filepath, index=False) print(f"Summary statistics saved to: {csv_filepath}\n") return csv_filepath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint_stats(self, stats):\n stats.to_csv(\n self.params.stat.dir + self.params.model.name + \"_\" + self.params.data.name + \".stat\",\n sep='\\t',index=False,header=True)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))", "def generate_csv(summaries, filename):\n with open(filename, 'wb') as f:\n header = ','.join(['ACTIVATION', 'HIDDEN SIZE', 'TRAIN LOSS', 'VAL LOSS', 'TRAIN PPX', 'VAL PPX']) + '\\n'\n f.write(header)\n\n def extract_best(summary, metric):\n return min([h.metrics[metric] for h in summary['history']])\n for summary in summaries:\n activation = summary['meta']['ACTIVATION']\n h_size = summary['meta']['NUM_HIDDEN']\n train_loss, val_loss, train_ppx, val_ppx = extract_best(summary, 'train_loss'), extract_best(summary, 'val_loss'), extract_best(summary, 'train_ppx'), extract_best(summary, 'val_ppx')\n line = \",\".join([activation] + map(lambda x: \"%.2f\" % (x), [h_size, train_loss, val_loss, train_ppx, val_ppx])) + '\\n'\n f.write(line)", "def save_csv(csv_fn, output_dir, df_to_save):\n\n # import packages\n import os\n import pandas as pd\n\n\n if os.path.isfile(output_dir + '/' + csv_fn):\n print('Data already saved and will not be saved again')\n else:\n df_to_save.to_csv(output_dir + '/' + csv_fn, index = False)\n\n return None", "def save_results_to_csv(save_file_path, append=True, tmp_file_path=tmp_file_path, datefmt='%d/%m/%Y %H:%M:%S'):\n # load tmp results\n res_summary = open_json(tmp_file_path, data_format=pd.DataFrame)\n\n # calculate average scores\n combis = list(product(\n ['CV', 'Val'], \n ['precision', 'recall', 'f1', 'exact match', 'loss', \n 'precision_CE', 'recall_CE', 'f1_CE', 'exact match_CE']\n ))\n for combi in combis:\n get_average(res_summary, combi)\n\n # calculate end time\n end = datetime.now()\n res_summary['endtime'] = end.strftime(datefmt)\n res_summary['timetaken'] = end - \\\n datetime.strptime(res_summary['starttime'][0], datefmt)\n\n if append and os.path.isfile(save_file_path):\n # load old file\n old_summary = pd.read_csv(save_file_path)\n # append below\n res_summary = pd.concat([old_summary, res_summary], axis=0)\n\n # save final and delete tmp file\n res_summary.to_csv(save_file_path, index=False)\n os.remove(tmp_file_path)", "def store_performance(results, out_dir='', name='results_summary'):\n\n results_file = os.path.join(out_dir, name + '.csv')\n\n results_summary = {\n 'pop_mean_accuracies': ['%.2f' % (100 * np.mean(results[:, 1]))],\n 'pop_max_accuracies': ['%.2f' % (100 * np.max(results[:, 1]))],\n 'pop_mean_rewards': [np.mean(results[:, 0])],\n 'pop_max_rewards': [np.max(results[:, 0])],\n }\n\n df = pd.DataFrame.from_dict(results_summary)\n\n if os.path.isfile(results_file):\n old_df = pd.read_csv(results_file, sep=';')\n df = pd.concat([old_df, df], sort=True)\n df.to_csv(results_file, sep=';', index=False)", "def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)", "def write_csv(filename, summaries, float_format='%.02f'):\n data = [['solution', 'total time', 'ok', 'errors']]\n\n for var, s in summaries[0].stats.iteritems():\n for stat in s:\n data[0].append('%s %s' % (var, stat))\n\n for summary in summaries:\n row = [summary.solution, float_format % summary.total_time, summary.ok,\n summary.errors]\n for s in summary.stats.itervalues():\n for stat in s.itervalues():\n row.append(float_format % stat)\n data.append(row)\n\n with open(filename, 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n writer.writerow(row)", "def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)", "def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)", "def matrix_export_save(simulation, demandsegment, dir):\n matrix = demandsegment.matrix\n matrix_couples = Matrix.objects.filter(matrices=matrix)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/matrix(' + demandsegment.usertype.name + ')(' + str(demandsegment.usertype.user_id) + ').tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = matrix_couples.values_list('p__user_id', 'q__user_id', 'r')\n # Write a custom header.\n writer.writerow(['origin', 'destination', 'population'])\n writer.writerows(values)\n\n return filename", "def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats", "def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()", "def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])", "def output():\n\n print(\"\\n*****************************************************************\")\n print(\"\\nAll transfer data is saved in 'All_transfer_frequencies.csv'\")\n print(\"\\nThe most likely transfers are saved in 'likely_transfers.csv'\")\n\n os.mkdir(\"Transfer_results\")\n os.system(\"mv *.csv Transfer_results\")\n\n print(\"\\nBoth results are saved in the 'Transfer_results' directory\")\n print(\"\\nScript finished running\")\n print(\"\\n*****************************************************************\")", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def save_csv(self, save_path=''):\n if not save_path:\n time = datetime.now()\n time = datetime.strftime(time, '%Y-%m-%d_%H:%M:%S')\n filename = time + '.csv'\n save_path = os.path.join(os.path.abspath(os.curdir), filename)\n data = self._get_data()\n with open(save_path, 'wb') as f:\n for line in data:\n f.write(line + '\\n')", "def get_summary_filename(self):\n fn = os.path.join(SUMMARY_PREFIX,SUMMARY_CURRENT)\n if (os.path.isfile(fn)):\n try:\n fd = open(fn,\"r\")\n fname = fd.read()\n except :\n cmd = \"rm -f %s\"%fn\n result,status = self.cli(cmd)\n return \"\"\n return fname\n return \"\"", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def export_csv(state, out_file=None):\n\n if out_file is None:\n csvfile = sys.stdout\n else:\n csvfile = open(out_file, 'w')\n\n try:\n writer = csv.writer(csvfile)\n for grade in state.grades:\n writer.writerow([grade.student_name(), grade.score(),\n grade.breakdown(state.user_name)])\n finally:\n if out_file is not None:\n csvfile.close()", "def calculated_data_statistics_csv(my_df, feature_class_name):\n #remove Well ID and UTMs from dataframe\n updated_df = my_df.drop([0, 1, 8], axis = 1)\n raw_csv_name_stats = f\"{feature_class_name}_statistics.csv\"\n header_list = [\"T_min\",\n \"T_raw\",\n \"T_max\",\n \"K_min\",\n \"K_raw\",\n \"K_max\"]\n \n index_list = {0:'Count',\n 1:'Mean',\n 2:'Standard Deviation',\n 3:'Minimum',\n 4:'25th Percentile',\n 5:'Median',\n 6:'75th Percentile',\n 7:'Maximum',\n 8:'Logrithmic Mean',\n 9:'Logrithmic Standard Deviation'}\n log_mean = np.log10(updated_df.mean())\n log_std = np.log10(updated_df.std())\n useful_values = updated_df.describe()\n useful_values = useful_values.append(log_mean, ignore_index = True)\n useful_values = useful_values.append(log_std, ignore_index = True)\n useful_values = useful_values.rename(index = index_list) #gives the index unique names\n useful_values.to_csv(raw_csv_name_stats, header = header_list)", "def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)", "def write_stats(self):\n with open(self.log_file,'a') as output:\n writer = csv.writer(output)\n n_comps,comp_size = self.connected_component() # Calculate number of connected components (sub-colonies)\n writer.writerow([self.pop_size,\n self.get_average_age(),\n self.get_average_survival(),\n # Nearest neighbor logging disabled for speed\n # Use c++ tool to calculate nearest neighbors after runs\n # or uncomment line below to calculate in python (slower)\n # self.get_average_repro()] + [self.get_average_neighbors(r) for r in range(0,16)] +\n self.get_average_repro()] +\n [n_comps,\",\".join(map(str,comp_size))])", "def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)", "def save_csv(df, fp):\n if not os.path.isdir(os.path.split(fp)[0]):\n os.makedirs(os.path.split(fp)[0])\n\n df.to_csv(fp, index=False)", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def save_csv(csv_path: str, duplicates: pd.DataFrame) -> None:\n csv_file = os.path.join(csv_path, 'duplicates.csv')\n duplicates.to_csv(csv_file, index=False)", "def write_results_to_file(stocks_to_write):\n date = datetime.date.today()\n date_str = str(date.year) + '-' + str(date.month) + '-' + str(date.day)\n file_name_core = 'results-' + date_str\n\n with open(\"results/txt/\" + file_name_core + \".txt\",\n 'w') as txt_results_file:\n\n for stock in stocks_to_write:\n txt_results_file.write(stock.make_one_line_report() + \"\\n\")\n\n with open(\"results/csv/\" + file_name_core + \".csv\",\n 'w') as csv_results_file:\n\n writer = csv.writer(csv_results_file)\n writer.writerow(StockData.get_csv_data_headings())\n for stock in stocks_to_write:\n writer.writerow(stock.get_csv_data_list())", "def save_tile_data(tile_summary):\n\n time = Time()\n\n csv = summary_title(tile_summary) + \"\\n\" + summary_stats(tile_summary)\n\n csv += \"\\n\\n\\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size,\" + \\\n \"Color Factor,S and V Factor,Quantity Factor,Score\\n\"\n\n for t in tile_summary.tiles:\n line = \"%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\\n\" % (\n t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,\n t.r_e - t.r_s,t.color_factor,\n t.s_and_v_factor, t.quantity_factor, t.score)\n csv += line\n\n data_path = slide.get_tile_data_path(tile_summary.slide_name)\n csv_file = open(data_path, \"w\")\n csv_file.write(csv)\n csv_file.close()\n\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Data\", str(time.elapsed()), data_path))", "def save(self, data, outpath):\n data.to_csv(outpath)", "def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)", "def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")", "def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config = TestConfig()):\n writer = csv.writer(file_out)\n writer.writerow(('Benchmark', benchmark_result.name))\n writer.writerow(('Benchmark Group', benchmark_result.group))\n writer.writerow(('Failures', benchmark_result.failures))\n\n # Write result arrays\n if benchmark_result.results:\n writer.writerow(('Results',''))\n writer.writerows(metrics_to_tuples(benchmark_result.results))\n if benchmark_result.aggregates:\n writer.writerow(('Aggregates',''))\n writer.writerows(benchmark_result.aggregates)", "def write_output(series, filename):\n\n logging.info('Writing output')\n\n df = series.reset_index()\n\n df.columns = ['subject_id', 'classification']\n\n df.to_csv(filename, index=False)", "def create_output(root_folder_to_save_csv):\n df = pd.DataFrame(columns=['IMAGE Name', 'Original Class Name', 'Predictions On Original Images',\n 'Predictions On Perturbed Images'])\n df.to_csv(os.path.join(root_folder_to_save_csv, 'output.csv'), index=False)", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write_csv(self, directory = None):\n if ((directory is None) and\n (self._session.config.folder_basename is not None)):\n directory = self._session.results._full_path\n else:\n return\n \n file = CSV_file(self, directory)\n file.write()\n return file", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def create_report(cls):\n try: \n report = f\"{sysname}_statistics.csv\"\n file_exists = os.path.isfile(report)\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n data = SystemInformation.evaluate_data()\n with open(report, 'a', newline='') as csvreport:\n write = csv.DictWriter(csvreport, delimiter=',', lineterminator='\\n', fieldnames=fieldnames)\n if not file_exists:\n write.writeheader()\n write.writerow(data)\n logging.info(f\"Done. Report saved to file {report}\")\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def save(df, out_file):\n print('------------< save >------------')\n out_path = './data'\n makedirs(out_path, exist_ok=True)\n print(f'path: {out_path}/{out_file}')\n print(f'shape: {df.shape}')\n df.to_csv(f'{out_path}/{out_file}', index=False)\n print('--------------------------------')", "def save_report(dataframe, filename, index=True):\n out_file = reports_path / filename\n dataframe.to_csv(out_file, index=index)\n logging.info(f\"Written report to {out_file.resolve()}\")", "def write_tocsv(file_name, dataframe) :\n print(\"\\nSaved result to {}\\n\".format(file_name))\n dataframe.to_csv(file_name, mode='a', header=False,index=False)", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def save(df, path):\n \n # Extract the directory and filename from the given path\n directory = os.path.split(path)[0]\n filename = os.path.split(path)[1]\n if directory == '':\n directory = '.'\n \n # If the directory does not exist, create it\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n # The final path to save to\n savepath = os.path.join(directory, filename)\n \n # Save the dataset\n sampled_frame.to_csv(savepath, index=False)", "def save_results(self, results, file_name, file_type):\n if file_type == 'csv':\n csv_filename = '{}.csv'.format(file_name)\n\n with open(csv_filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(results)", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def public_transit_export_save(simulation,dir):\n matrix_couples = get_query('public_transit', simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/public_transit.tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = matrix_couples.values_list('p__user_id', 'q__user_id', 'r')\n # Write a custom header.\n writer.writerow(['origin', 'destination', 'travel time'])\n writer.writerows(values)\n\n return filename", "def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def write_background_stats(self, out_dir, all_output_lines):\r\n out_file = os.path.join(out_dir, \"simulations_stats.csv\")\r\n with open(out_file, \"w\") as file:\r\n col_names = \"sim_id,round,best_score,avg_score,solutions,elitist_candidates,elitism_rate,discard_rate,\" \\\r\n \"mutation_rate,colors\\n\"\r\n file.write(col_names)\r\n for lines_of_one_simulation in all_output_lines:\r\n for line in lines_of_one_simulation:\r\n file.write(line)", "def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)", "def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK", "def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()", "def generate_csv(results, path):\n csvpath = path + '/results.csv'\n\n if os.path.exists(csvpath):\n os.remove(csvpath)\n\n csvf = open(csvpath, 'w')\n\n # Header\n csvf.write(\"Powerpoint, Mean, Mode\\n\")\n\n # Results\n for power in results:\n mean = 0\n count = 0\n mode = 0\n maxc = 0\n for freq in results[power]:\n freqc = results[power][freq]\n\n mean += freq * freqc\n count += freqc\n\n if freqc > maxc:\n mode = freq\n\n mean /= count\n\n csvf.write(f\"{power}, {mean}, {mode}\\n\")\n\n csvf.close()", "def save_file(self):\n\n file_name, _ = QFileDialog.getSaveFileName(self, \"Save Experiment Output\", \"\", \"Text Files (*.txt);;CSV Files (*.csv)\")\n plot_name = file_name.split(\".\")[0] + \"_plot.pdf\"\n\n try:\n data_file = open(file_name, \"w\")\n data_file.write(self._fitter.fit_as_csv)\n data_file.close()\n\n plot_save = PdfPages(plot_name)\n fig, ax = self._fitter.plot()\n plot_save.savefig(fig)\n plot_save.close()\n except:\n pass", "def export(self, directory, fname=None):\r\n if not fname:\r\n try:\r\n fname = self.dt_0.strftime('%Y.%m.%d_%H.%M.%S') + '.txt'\r\n except AttributeError:\r\n fname = self.fname\r\n\r\n self.data.to_csv(Path(directory, fname), header=None, float_format='%.7g')\r\n logger.info(f'{self} export to folder {directory}.')", "def save_values(self):\n f_name = self.img_path.split('.')[0] + '_{}_'.\\\n format(self.data_type_name) + '.csv'\n dir_name = os.path.join(self.base_dir, f_name)\n if not os.path.exists(dir_name):\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)\n else:\n os.remove(f_name)\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)", "def save_stats(self, filename, save_full=False, overwrite=True):\n output_ds = self.covs_ds\n if save_full and self.nam_covar_var not in self.covs_ds.data_vars:\n output_ds = self.define_full_ds()\n self.covs_ds.close()\n\n if self.COMPRESS_OUTPUT:\n compress_vars = self.list_of_vars\n if save_full and self.num_cross_covs != 0:\n compress_vars = self.list_of_full_vars\n encoding = {}\n for var in compress_vars:\n if not var in self.exclude_compress:\n encoding.update({var: {\"zlib\": True, \"complevel\": 1}})\n else:\n encoding = None\n\n print(\"Writing stats data to file {}\".format(filename), flush=True)\n if overwrite:\n temp_filename = tempfile.mktemp()\n output_ds.to_netcdf(temp_filename, unlimited_dims=[\"time_counter\"],\n encoding=encoding)\n shutil.move(temp_filename, filename)\n else:\n output_ds.to_netcdf(filename, unlimited_dims=[\"time_counter\"],\n encoding=encoding)", "def file_observer(population, num_generations, num_evaluations, args):\r\n try:\r\n statistics_file = args['statistics_file']\r\n except KeyError:\r\n statistics_file = open('inspyred-statistics-file-{0}.csv'.format(time.strftime('%m%d%Y-%H%M%S')), 'w')\r\n args['statistics_file'] = statistics_file\r\n try:\r\n individuals_file = args['individuals_file']\r\n except KeyError:\r\n individuals_file = open('inspyred-individuals-file-{0}.csv'.format(time.strftime('%m%d%Y-%H%M%S')), 'w')\r\n args['individuals_file'] = individuals_file\r\n\r\n stats = inspyred.ec.analysis.fitness_statistics(population)\r\n worst_fit = stats['worst']\r\n best_fit = stats['best']\r\n avg_fit = stats['mean']\r\n med_fit = stats['median']\r\n std_fit = stats['std']\r\n \r\n statistics_file.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(num_generations, len(population), worst_fit, best_fit, med_fit, avg_fit, std_fit))\r\n for i, p in enumerate(population):\r\n individuals_file.write('{0}, {1}, {2}, {3}\\n'.format(num_generations, i, p.fitness, str(p.candidate)))\r\n statistics_file.flush()\r\n individuals_file.flush()", "def write_results(self, results, fname, folder=None):\n folder = folder or self.output_dir\n\n if not os.path.exists(folder):\n logger.info(\"creating {} (did not exist)\".format(folder))\n os.makedirs(folder)\n\n out_file = os.path.join(folder, fname)\n logger.info(\"writing to {}\".format(out_file))\n df = pd.DataFrame(results)\n df.to_csv(out_file, index=False)\n \n return None", "def output_csv(df):\n # remove existing plot\n if os.path.exists(\"files/converted.csv\"):\n os.remove(\"files/converted.csv\")\n # save csv\n df.to_csv('files/converted.csv')", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def write_to_csv(self, log_dir, run_dir, hmc=False):\n _, run_str = os.path.split(run_dir)\n avg_data = {\n 'log_dir': log_dir,\n 'run_dir': run_str,\n 'hmc': hmc,\n }\n\n for key, val in dict(sorted(self.data.items())).items():\n tensor = tf.convert_to_tensor(val)\n arr, steps = therm_arr(tensor.numpy(), therm_frac=0.2)\n if 'steps' not in avg_data:\n avg_data['steps'] = len(steps)\n avg_data[key] = np.mean(arr)\n\n # avg_data[key] = tf.reduce_mean(arr)\n\n avg_df = pd.DataFrame(avg_data, index=[0])\n outdir = os.path.join(BASE_DIR, 'logs', 'GaugeModel_logs')\n csv_file = os.path.join(outdir, 'inference.csv')\n head, tail = os.path.split(csv_file)\n io.check_else_make_dir(head)\n io.log(f'Appending inference results to {csv_file}.')\n if not os.path.isfile(csv_file):\n avg_df.to_csv(csv_file, header=True, index=False, mode='w')\n else:\n avg_df.to_csv(csv_file, header=False, index=False, mode='a')", "def pricing_export_save(simulation, dir):\n # Get all tolls.\n policies = get_query('policy', simulation)\n tolls = policies.filter(type='PRICING')\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/pricings.tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = list()\n for toll in tolls:\n if toll.usertype:\n usertype_id = toll.usertype.user_id\n else:\n usertype_id = ''\n values.append([toll.location.user_id, toll.get_value_vector(),\n toll.get_time_vector(), usertype_id])\n # Write a custom header.\n writer.writerow(['link', 'values', 'times', 'traveler_type'])\n writer.writerows(values)\n\n return filename", "def save_csvFile(df,file_location,file_name,sep,encoding):\n try:\n date=datetime.datetime.now().replace(microsecond=0)\n fullpath=file_location + file_name\n df.to_csv(fullpath, sep=sep, encoding=encoding, index=False, header=True)\n except IOError:\n print('Error saving the file: ' , file_name)\n sys.exit(1)", "def output(df, path=\"./outputs\", file=\"output\"):\n # if output directory does not already exist, make it\n if not os.path.isdir(path):\n os.makedirs(path)\n\n # check that the user has included file extension, if so, remove it\n if '.csv' in file:\n file = file.replace('.csv', '')\n\n # merge path and file\n full_path = os.path.join(path, f\"{file}.csv\")\n \n while True:\n try:\n df.to_csv(full_path, sep='|', index=False)\n # update user\n print(f\"{file} data issues saved to \"\n f\"'{full_path}'.\")\n # if data saved successfully we break the while loop\n break\n except PermissionError:\n # user or another has file open, request to close or rename\n rename = input(f\"'{full_path}' is open, please close and press <Enter>\"\n \" or type a new filename (and press <Enter>).\")\n if rename.strip() == '':\n pass\n elif '.csv' in rename.strip():\n full_path = os.path.join(path, rename) # merge path and file\n else:\n full_path = os.path.join(path, rename+'.csv')", "def save(df, save_preprocessed_dataframe_path, name):\n\n df.to_csv(save_preprocessed_dataframe_path + name + '.csv', index=False)", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def save_csv(vals: Vals):\n logging.info('Writing data to csv file')\n with open(PureWindowsPath(os.path.realpath(__file__)).parent / 'results.csv', 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(('X', 'Y'))\n\n for x, y in dict(zip(vals.x, vals.y)).items():\n csvwriter.writerow((x, y))\n\n logging.info('Finished writing')\n messagebox.showinfo('Save to CSV', 'Successfully saved!')", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def write_csv(df: pd.DataFrame, outpath) -> None:\n logging.info('Writing detection records to disk')\n outdir = os.path.split(outpath)[0]\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n df.to_csv(outpath, index=False)\n logging.info(f'{len(df)} records written to {outpath}')", "def create_campaign_all_stats_csv(stats_file_directory, campaign_name, all_stats_fields,\n campaign_all_stats_data):\n # We build the campaign statistucs file here with the country stats stats\n file_directory = stats_file_directory + '/' + campaign_name.replace(' ', '_') + '_all_stats.csv'\n with open(file_directory, 'w', encoding='UTF-8') as all_stats_csv_file:\n writer = csv.writer(all_stats_csv_file)\n fields = all_stats_fields\n writer = csv.DictWriter(all_stats_csv_file, fieldnames=fields)\n writer.writeheader()\n writer.writerows(campaign_all_stats_data)\n all_stats_csv_file.close()\n return campaign_name.replace(' ', '_') + '_all_stats.csv'", "def write_out(matrix, filename):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile)\n for r in matrix:\n writer.writerow(r)\n print(filename + ' writen!')", "def write_2D_list(self, list_name, statistics):\n filename = os.getcwd() + list_name + \".csv\"\n print(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, mode='w+', newline='',encoding='utf8') as list_file:\n list_writer = csv.writer(list_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for item in statistics:\n list_writer.writerow(item)", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def _save_log(self, save_dir, data):\n date = datetime.datetime.today().strftime('%Y-%m-%d')\n file_dir = os.path.join(save_dir, date + \".csv\")\n with open(file_dir, 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(data)", "def write_stats(self, directory):\n\n target_dir = os.path.join(directory, 'tweet_stats')\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # general stats\n self.stats_summary.append(\"%-30s\\t%12d\\n\" % ('Number of tweets', len(self)))\n self.stats_summary.append('%-30s\\t%-12s\\t%-12s' % ('Index', 'Type count', 'Token count'))\n\n for k in self.stats:\n k_stats = self.stats[k]\n\n rank = 0\n token_count = 0\n lines = []\n\n # Sort by frequency of words, pairs, triples, urls etc.\n k_stats_sorted = sorted(k_stats.iteritems(), key=operator.itemgetter(1), reverse=True)\n\n for val, card in k_stats_sorted:\n rank += 1\n token_count += card\n lines.append(\"%4d %-60s %5d\" % (rank, val, card))\n\n self.write_file(target_dir, \"%s.txt\" % k, \"\\n\".join(lines))\n\n # update summary with index name and corresponding type and token counts\n self.stats_summary.append('%-30s\\t%12d\\t%12d' % (k, len(k_stats), token_count))\n\n # write summary info\n self.write_file(target_dir, 'general.txt', \"\\n\".join(self.stats_summary))", "def exportToCSV(subdomain,sub,file_name):\n\ttry:\n\t\twith open(file_name,\"w\") as file:\n\t\t\tfnames = ['subdomain', 'saved_date']\n\t\t\twriter = csv.DictWriter(file, fieldnames=fnames)\n\t\t\twriter.writeheader()\n\t\t\tfor _ in sub:\n\t\t\t\twriter.writerow({'subdomain' : _.subdomain, 'saved_date': _.created_date})\n\t\t\tprint(f'[+] {len(sub)} results of {subdomain} exported successfully.\\n[+] Path {result_save_dir}/{subdomain}_{datetime.now().date()}.csv')\n\texcept IOError as error:\n\t\tlogger.log(\"ERROR\",\"[+] Fails to open or create file! Please check your permissions\")", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def write_result_to_file(duration_string, statistic):\n\n with open(\"scores.txt\", \"a\") as results_file:\n for player_name in statistic.keys():\n if player_name == 'draw':\n continue\n results_file.write(\"{} won {} number of times\\n\".format(player_name, statistic[player_name]))\n results_file.write(\"draws = {} times\\n\".format(statistic.get(\"draw\", 0)))\n results_file.write(\"{}\\n\".format(duration_string))\n today = date.today()\n results_file.write(\n \"This game was played on {}, {}, {}\\n\\n\".format(today.day, calendar.month_name[today.month], today.year))", "def write_metrics(output_dir, metrics, config, ancestors):\n os.makedirs(output_dir, exist_ok=True)\n\n file_name = \"metrics.csv\"\n file_path = os.path.join(output_dir, file_name)\n\n with open(file_path, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n csv_writer = csv.writer(csvfile)\n for line in metrics.items():\n csv_writer.writerow(line)\n\n record_provenance(file_path, config, ancestors)", "def save_model(model, output_dir, epoch, step, metric, current_score,\n best_score, name=\"model\"):\n assert hasattr(model, \"loss\") and model.loss is not None\n assert hasattr(model, \"optimizer\") and model.optimizer is not None\n\n model.save(os.path.join(output_dir, f\"{name}.h5\"))\n\n file_io.write_csv(\n os.path.join(output_dir, f\"{name}.step\"),\n [epoch, step, metric, current_score, best_score])", "def test01Summarize(self):\n \n summaryFile = re.sub(\"\\.csv\",\"\",self.parsedFile)+\"_summary.csv\"\n if os.path.exists(summaryFile):\n os.remove(summaryFile)\n\n self.bm.create_summarized(self.parsedFile,uniprot=True)\n self.assertTrue(os.path.exists(summaryFile))", "def write_csv(data_frame, file_name):\n data_frame.coalesce(1).write \\\n .option('header', True).mode('overwrite') \\\n .save(f'outputs/{file_name}', format('csv'))", "def writeStatTables(allStats, overviewStats):\n\n # Create all stats \n statFileHandlers = {}\n statFiles = {}\n measures = ['recall', 'specificity', 'FPR', 'FNR', 'PBC', 'precision', 'f-measure']\n\n for measure in measures:\n statFileHandlers[measure] = open(measure + '-all.csv', 'w', newline='')\n statFiles[measure] = csv.writer(statFileHandlers[measure], delimiter=';')\n \n firstRowText = ''\n allMethods = []\n\n for categoryKey, category in allStats.items():\n\n # Check the number of subSubCategories inside the subCategories and issue a warning\n # if the count of subSubCategories for different subCategories does not match\n\n for subCategoryKey, subCategory in category.items():\n for subSubCategoryKey, subSubCategory in subCategory.items():\n if subSubCategoryKey not in allMethods:\n allMethods.append(subSubCategoryKey)\n\n\n firstRowText = [''] + allMethods\n \n for measure in measures:\n statFiles[measure].writerow(firstRowText)\n \n\n for categoryKey, category in allStats.items():\n for subCategoryKey, subCategory in category.items():\n\n rows = {'recall': [categoryKey + '/' + subCategoryKey], 'specificity' : [categoryKey + '/' + subCategoryKey], 'FPR' : [categoryKey + '/' + subCategoryKey], 'FNR' : [categoryKey + '/' + subCategoryKey], 'PBC' : [categoryKey + '/' + subCategoryKey], 'precision' : [categoryKey + '/' + subCategoryKey], 'f-measure': [categoryKey + '/' + subCategoryKey]}\n for method in allMethods:\n\n if method in subCategory:\n if isinstance(subCategory[method], (list, tuple, dict)):\n for methodKey, value in subCategory[method].items():\n rows[methodKey] = rows[methodKey] + [float(value)]\n else:\n print('Error')\n else:\n for measure in measures:\n rows[measure] = rows[measure] + [''] # Write empty entry at this column\n\n\n for measure in measures:\n statFiles[measure].writerow(rows[measure])\n\n for statFileHandlerKey, statFileHandler in statFileHandlers.items():\n statFileHandler.close()\n\n # 1) Overview, containing summarised numbers of the entire category, for each method\n # From the overview stats, summarise by method and folder\n overallRawStats = {} # Overall stats, summarised by method, i.e. Kim2015-no-blur\n siteRawStats = {} # Stats for each method, summarised by folder, i.e. Kim2015-no-blur/Egensevej\n\n for siteKey, site in overviewStats.items():\n if categoryKey not in siteRawStats:\n siteRawStats[siteKey] = {}\n\n for sequenceKey, sequence in site.items():\n if len(subCategory.items()) > 1:\n\n for methodKey, method in sequence.items():\n if methodKey not in siteRawStats[siteKey]:\n siteRawStats[siteKey][methodKey] = {}\n \n if methodKey not in overallRawStats:\n overallRawStats[methodKey] = {}\n\n for statKey, stat in method.items():\n if statKey not in siteRawStats[siteKey][methodKey]:\n siteRawStats[siteKey][methodKey][statKey] = float(stat)\n else:\n siteRawStats[siteKey][methodKey][statKey] += float(stat)\n\n\n if statKey not in overallRawStats[methodKey]:\n # Just add it\n overallRawStats[methodKey][statKey] = float(stat)\n else:\n # Otherwise add to list\n overallRawStats[methodKey][statKey] += float(stat)\n \n overallStatHandler = open('overallStats.csv', 'w', newline='')\n overallStatFile = csv.writer(overallStatHandler, delimiter=';') \n siteStatHandler = open('statsPerSite.csv', 'w', newline='')\n siteStatFile = csv.writer(siteStatHandler, delimiter=';')\n\n # Write the header row\n subSubCategoryHeader = [''] + [''] + measures\n overallStatFile.writerow(subSubCategoryHeader)\n siteStatFile.writerow([''] + subSubCategoryHeader)\n\n # Get the recall, specificity and other measures from the summarised numbers and write them to the stat files\n for methodName, rawStats in overallRawStats.items():\n stats = getStats(rawStats)\n\n names = methodName.split('/')\n row = names\n\n for measure in measures:\n row = row + ['{:f}'.format(stats[measure])]\n\n overallStatFile.writerow(row)\n\n\n for siteName, methodStats in siteRawStats.items():\n\n for methodName, rawStats in methodStats.items():\n stats = getStats(rawStats)\n\n row = [siteName] + methodName.split('/')\n\n for measure in measures:\n row = row + ['{:f}'.format(stats[measure])]\n\n siteStatFile.writerow(row)\n \n overallStatHandler.close();\n siteStatHandler.close();\n\n # Convert the commas from \".\" to \",\" to allow processing in Excel\n convertCommaInFile('overallStats.csv')\n convertCommaInFile('statsPerSite.csv')\n\n for measure in measures:\n convertCommaInFile(measure + '-all.csv')\n\n\n return", "def save_fitted_dataframe(comp_data_df, filename):\r\n comp_data_df.to_csv('{}.csv'.format(filename))\r\n return 0" ]
[ "0.67156446", "0.6640688", "0.65309376", "0.65059", "0.6463663", "0.6374038", "0.6361525", "0.632994", "0.63163084", "0.62892014", "0.6272734", "0.62425643", "0.62299234", "0.62213546", "0.6216482", "0.6173342", "0.6157177", "0.6122196", "0.61156815", "0.60866743", "0.6001306", "0.59814596", "0.59759885", "0.5950357", "0.594077", "0.5935059", "0.5926359", "0.5910836", "0.59108216", "0.5897887", "0.58936036", "0.5886743", "0.5879899", "0.58767074", "0.58742374", "0.58731914", "0.5858122", "0.5846668", "0.5838771", "0.5828295", "0.58267325", "0.5825192", "0.5822169", "0.5812701", "0.580716", "0.58066803", "0.5801441", "0.5785045", "0.57802933", "0.57768774", "0.57717276", "0.57708246", "0.57673466", "0.57636106", "0.57545805", "0.5748054", "0.57475483", "0.57436883", "0.5740409", "0.57323635", "0.573075", "0.57202786", "0.5710536", "0.5701514", "0.57004595", "0.56988364", "0.5696266", "0.56907207", "0.56821644", "0.5679812", "0.5668782", "0.5667463", "0.5666848", "0.5657431", "0.56568307", "0.56511474", "0.56394714", "0.56259984", "0.56227887", "0.5621992", "0.56204784", "0.561249", "0.5611523", "0.56090516", "0.560722", "0.5604645", "0.5602541", "0.5599444", "0.55931675", "0.5588068", "0.5587932", "0.55834764", "0.55806524", "0.55747384", "0.557365", "0.55684453", "0.5566533", "0.5558946", "0.5553888", "0.55485106" ]
0.78815484
0
Opens all JPEG+RAW images in the specified experiment directory and returns as a map of
def get_raw_image_paths_for_experiment(local_sync_directory_path, experiment_directory): raw_images_directory = os.path.join(local_sync_directory_path, experiment_directory) raw_image_paths = get_files_with_extension(raw_images_directory, ".jpeg") return pd.Series(raw_image_paths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def readImages(image_dir):\n images = {}\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(reduce(list.__add__, map(glob, search_paths)))\n for f in image_files:\n images[f[f.rfind(\"/\") + 1:f.rfind(\".\")]] = cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR)\n\n return images", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def load_test_data(image_path):\n raw = []\n image_filename = dict()\n count = 0\n for filename in glob.glob(image_path):\n name = os.path.basename(filename)[:-4]\n try:\n im = Image.open(filename)\n im = im.convert('L')\n im = im.resize((img_rows, img_cols))\n raw.append(np.array(im))\n image_filename[count] = name\n count += 1\n im.close()\n except IOError:\n print('Error loading image ', filename)\n return [raw, image_filename]", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def load_test_images(images):\n loaded = {}\n for description, _ in images.items():\n loaded[description] = load_from_netcdf(description)\n return loaded", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def read_x_data(data_dir):\n files = glob.glob(os.path.join(data_dir, '*.jpg'))\n return [(os.path.basename(file), io.imread(file)) for file in files]", "def mock_raw_data(tmp_dir, raw_dim=1024, num_channels=3, num_images=1):\n\n tf.gfile.MakeDirs(tmp_dir)\n\n for image_id in range(num_images):\n\n raw_image_path = os.path.join(tmp_dir, \"%s.jpg\" % image_id)\n\n mock_raw_image(x_dim=raw_dim, y_dim=raw_dim,\n num_channels=num_channels,\n output_path=raw_image_path)", "def tile_dict(path):\n dic = {}\n for image in os.listdir(path):\n if image.split('.')[-1] == 'png':\n try:\n im = Image.open(image)\n except:\n print \"image file %s cannot open\" % image\n continue\n if im.mode != 'RGB':\n im = im.convert('RGB')\n dic[image] = average_image(im)\n return dic", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def read_data(case_dir):\n dict_images = dict()\n list_files = ['MR_512.nii.gz', 'landmarks_512.csv', ]\n # In fact, there is no Mask during inference, so we cannot load it.\n\n for file_name in list_files:\n file_path = case_dir + '/' + file_name\n assert os.path.exists(file_path), case_dir + ' does not exist!'\n\n if file_name.split('.')[-1] == 'csv':\n landmarks = pd.read_csv(file_path)\n dict_images['list_landmarks'] = landmark_extractor(landmarks)\n elif file_name.split('.')[0].split('_')[0] == 'MR':\n dict_images['MR'] = sitk.ReadImage(file_path, sitk.sitkFloat32)\n dict_images['MR'] = sitk.GetArrayFromImage(dict_images['MR'])[np.newaxis, :, :, :]\n elif file_name.split('.')[0].split('_')[0] == 'Mask':\n dict_images['Mask'] = sitk.ReadImage(file_path, sitk.sitkInt16)\n dict_images['Mask'] = sitk.GetArrayFromImage(dict_images['Mask'])[np.newaxis, :, :, :]\n\n return dict_images", "def load_images(symbol_dict):\n \n args = DeepScribe.get_command_line_args()\n\n if args.symbol is None:\n symb_query = \"*\"\n else:\n symb_query = unicodedata.normalize('NFC', args.symbol)\n \n query = args.directory + \"/\" + symb_query + \"_*.jpg\"\n count = 0\n\n for fn in tqdm(iglob(query), desc='filenames'):\n # find first occurence of \"_\" after directory name, which marks the start of the uuid\n fn = unicodedata.normalize('NFC', fn)\n separator_idx = fn.find(\"_\", len(args.directory)+1)\n extension_idx = fn.rfind(\".jpg\")\n name = fn[len(args.directory)+1 : separator_idx]\n name = name.upper().strip(' »«')\n uuid = fn[separator_idx+1 : extension_idx]\n\n # not using cv2.imread() in order to read unicode filenames\n img = cv2.imdecode(np.fromfile(fn, dtype=np.uint8),\n cv2.IMREAD_UNCHANGED)\n symb_img = Symbol_Image(name, uuid, img)\n\n if name not in excluded_readings:\n if name in symbol_dict:\n symbol_dict[name].append(symb_img)\n else:\n symbol_dict[name] = [symb_img]\n count += 1\n\n if args.limit != 'max':\n if count >= args.limit:\n break", "def get_imgs(path):\n imlist = {}\n for each in glob(path + \"*\"):\n word = each.split(\"/\")[-1]\n imlist[word] = []\n for imagefile in glob(path+word+\"/*\"):\n im = cv2.imread(imagefile, 0)\n imlist[word].append(im)\n\n return imlist", "def getimgs():", "def get_images(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n \r\n directory_list = os.listdir(directory) # Get list of files\r\n for entry in directory_list:\r\n if len(file_list)<2:\r\n absolute_filename = os.path.join(directory, entry)\r\n try:\r\n image = PIL.Image.open(absolute_filename)\r\n file_list += [entry]\r\n image_list += [image]\r\n except IOError:\r\n pass # do nothing with errors tying to open non-images\r\n return image_list, file_list", "def read_image():\n images = []\n for hand in os.listdir('images'):\n img = cv2.imread(os.path.join('images', hand))\n if img is not None:\n images.append(img)\n return images", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def readImages(imgFolder='img/'):\n #Each image in images is a numpy array of shape 192x168(x1) (heightxwidth)\n #images datatype is a regular numpy list\n filenames = os.listdir(imgFolder)\n if imgFolder == 'img/':\n images = [imageio.imread('img/'+fn+'/image0.jpg')[::,::].astype(np.float32)/255. for fn in filenames]#glob.glob(imgFolder+'*.jpg')]\n else:\n images = [imageio.imread(imgFolder+fn)[::,::].astype(np.float32)/255. for fn in filenames]\n return images", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def load_images_of_experiment(experiment, data_type, as_dict=False):\n if type(data_type) != list:\n data_type = [data_type]\n\n image_data_paths = []\n for experiment_step in experiment:\n image_data_paths.append([path for path in experiment_step for i in data_type if i in path])\n\n images_dict = load_data_from_list_of_paths(image_data_paths)\n\n if as_dict:\n return images_dict\n else:\n return convert_dict_to_list(images_dict)", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def get_roi_information(storage_directory):\n exp_path_head = storage_directory\n\n # reformat path for mac with local access\n # TODO: might need to adapt this when data is shared via DropBox\n temp = exp_path_head.split('/')\n temp[1] = 'Volumes'\n exp_path_head = '/'.join(temp)\n\n # Find experiment dir in storage_directory\n exp_path_files = os.listdir(exp_path_head)\n exp_folder_list = [i for i in exp_path_files if 'ophys_experiment' in i]\n if len(exp_folder_list) > 1:\n raise Exception('Multiple experiment folders in ' + exp_path_head)\n else:\n exp_folder = exp_folder_list[0]\n\n # Find file by suffix\n processed_path = os.path.join(exp_path_head, exp_folder)\n for fname in os.listdir(processed_path):\n if fname.endswith('input_extract_traces.json'):\n jsonpath = os.path.join(processed_path, fname)\n with open(jsonpath, 'r') as f:\n jin = json.load(f)\n f.close()\n break\n\n # Assemble DataFrame.\n roi_locations = pd.DataFrame.from_records(\n data=jin['rois'],\n columns=['id', 'x', 'y', 'width', 'height', 'valid', 'mask'],\n )\n roi_locations['session_id'] = int(\n exp_path_head.split('/')[-2].split('_')[-1]\n )\n\n return roi_locations", "def get_experiment_frames(experiments, datadir=None):\n import pandas as pd\n\n exp_frames = dict()\n\n if not datadir:\n datadir = os.getcwd()\n\n print 'reading profiles in %s' % datadir\n\n for exp in experiments:\n print \" - %s\" % exp\n exp_frames[exp] = list()\n\n for sid, label in experiments[exp]:\n print \" - %s\" % sid\n \n import glob\n for prof in glob.glob (\"%s/%s-pilot.*.prof\" % (datadir, sid)):\n print \" - %s\" % prof\n frame = pd.read_csv(prof)\n exp_frames[exp].append ([frame, label])\n \n return exp_frames", "def read_images(imagedir, size, ncores=mp.cpu_count()):\n _f = functools.partial(_image_worker, size=size)\n with mp.Pool(ncores) as pool:\n ret = pool.map(_f, get_files(imagedir))\n return {k: v for k,v in ret if v is not None}", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def process_images(image_folder: Path) -> List[Dict]:\n images = []\n files = image_folder.glob(\"*.jpg\")\n\n for file_path in files:\n file_name = file_path.name\n file_id = file_name.split(\".jpg\")[0]\n file_id = file_id.split(\"in\")[-1]\n file_id = int(file_id)\n file_id = f\"{file_path.parent.parent.name}_{str(file_id)}\"\n\n width, height = imagesize.get(str(file_path))\n\n image_data = {\"id\": file_id,\n \"width\": width,\n \"height\": height,\n \"filename\": str(file_path)}\n images.append(image_data)\n\n return images", "def list_images(bin_lid):\n bin_url = DATA_NAMESPACE + bin_lid + '.json'\n logging.info('listing images for %s' % bin_lid)\n ds = json.loads(urllib.urlopen(bin_url).read())\n for d in ds:\n yield d['imagename']", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def getGrouppedRawImages():\n imagesGlob = ['**/*_timestamped.jpg', '**/*_timestamped.JPG']\n images = func.reduce(operator.add, [[path for path in path.Path(\n '.').glob(glob)] for glob in imagesGlob], [])\n labelled = sorted([{\n 'label': image.parent.parent.name,\n 'time': image.parent.name,\n 'path': image\n } for image in images], key=lambda label: label['label'])\n return iter.groupby(labelled, key=lambda label: label['label'])", "def environmentImages(dirPath):\n images = []\n for f in os.listdir(dirPath):\n if os.path.isfile(os.path.join(dirPath, f)):\n name, ext = os.path.splitext(f)\n if ext.lower().replace(\".\", \"\") in [\"hdr\", \"exr\", \"rad\", \"tif\", \"tiff\"]:\n images.append(f)\n return sorted(images)", "def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def load_isolated_images(Args):\n # load first galaxy images\n name = 'first_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y1 = load_images(filename, ['i'], Args)\n # load second galaxy images\n name = 'second_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y2 = load_images(filename, ['i'], Args)\n Y = {'Y1': Y1,\n 'Y2': Y2}\n return Y", "def test_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TEST_FILES, 10000))", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def image_iter() -> iter:\r\n return ('Images/' + image for image in IMAGES)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def import_images(folder_path, alphabet_list):\n reference_images = {}\n for i in alphabet_list[:-1] + ['space']:\n img = np.array(Image.open(folder_path + f'/{i}.png')).astype('int')\n reference_images[i] = img\n reference_images[' '] = reference_images.pop('space')\n return reference_images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def get_imlist(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.bmp')\n ]", "def get_all_images_from_filesystem():\r\n\r\n logging.debug('get_all_images_from_filesystem()')\r\n\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n all_full_image_paths = []\r\n for my_file in os.listdir(dir_path):\r\n if os.path.isfile(os.path.join(dir_path, my_file)):\r\n all_full_image_paths.append(os.path.join(dir_path, my_file))\r\n return all_full_image_paths", "def read_files(self):\n files = []\n # if this is test folder then there are no labels\n if 'test' in self.list_path:\n for item in self.img_list:\n image_path = item\n name = os.path.splitext(os.path.basename(image_path[0]))[0]\n files.append({\n \"img\": image_path[0],\n \"name\": name,\n })\n else:\n for item in self.img_list:\n image_path, label_path = item\n name = os.path.splitext(os.path.basename(label_path))[0]\n files.append({\n \"img\": image_path,\n \"label\": label_path,\n \"name\": name,\n \"weight\": 1\n })\n return files", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass", "def __init__(self, data_dir, file_prefix, num_images):\n print(file_prefix)\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def get_input_files():\n\n raw_list = abspath(get('input_files'))\n valid_types = ['image/jpeg', 'image/tiff']\n images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]\n print('* Input images: {}'.format(len(images)))\n return images", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def extract(directory):\n global usersDict\n images = []\n\n for (dirpath, dirnames, filenames) in walk(directory):\n if not filenames:\n continue\n for file in filenames:\n img = Image(dirpath, file)\n images.append(img)\n # This will utilized all cores, good for single machine / VM, it is not a distributed solution\n pool = Pool(4, initializer, ())\n\n pool.map(model_processing, images)\n\n print('FINISHHH----', usersDict)\n for user in usersDict:\n print('DICTTT----', user.images)\n user.save()", "def get_imlist(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n self.images = []\n self.display_match = False\n self.useBlending = False\n print('found %d images' % len(self.files))", "def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)", "def get_images(path_list):\n images = []\n labels = []\n names = []\n i = 0\n for path in path_list:\n for fruit_dir_path in glob.glob(path):\n fruit_label = fruit_dir_path.split(\"/\")[-1]\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n image = cv2.resize(image, (45, 45))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n images.append(image)\n names.append(fruit_label)\n labels.append(i)\n i += 1\n\n images = np.array(images)\n print(images.shape)\n # add a new dimension here\n with np.nditer(images, op_flags=['readwrite']) as it:\n for x in it:\n x = np.expand_dims(x, axis=0)\n labels = np.array(labels)\n return images, labels, i", "def read_base_folder(base_path: str) -> dict:\n\tfour_x_images: dict = {}\n\tfor file in listdir(four_x_file_path):\n\t\tfile_extension = os.path.splitext(file)[1]\n\t\tif file_extension in image_extensions:\n\t\t\tfour_x_images[file] = join(base_path, file)\n\treturn four_x_images", "def read_raw_img(kind):\n\n mypath = RAW_DIR_PATH[kind]\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))\n and f[0] != '.']\n random.shuffle(files)\n\n if kind == 'bad':\n files *= 3\n\n for img in files:\n yield Image.open(mypath + img)", "def get_images(stage=0):\n return get_files(stage)[0]", "def save_processed_images(exp_dir, img_dict):\n # save them into a directory called \"processed\"\n img_fname = os.path.join(exp_dir, str(experiment) + '_processed.jpg')", "def global_metadata(paths):\n\n # Weakly group images to partition image set size- crucial optimization step\n if os.path.exists(paths.image_preprocess):\n clumped_paths = json.loads(open(paths.image_preprocess).read())\n else:\n clumped_paths = network.alpha_categorize(paths)\n print(\"Hashed source images\")\n\n with open(paths.image_preprocess, 'w') as json_file:\n json.dump(clumped_paths, json_file)\n\n # Combinatorial image grouping to graph\n image_graph = network.load_graph(paths.image_network_path)\n\n total = len(list(chain(*clumped_paths.values())))\n counter = 0.\n\n for image_paths in clumped_paths.values():\n counter += len(image_paths)\n print(str(int(counter / float(total) * 100)) + \"% complete\")\n\n if len(image_paths) > 1:\n image_grouping = images.load_paths(paths.default_patches, image_paths)\n image_graph = metadata.network.network_images(\n image_grouping, threshold=0, network=image_graph)\n else:\n image_graph.add_node(image_paths[0])\n\n metadata.network.save_graph(paths.image_network_path, image_graph)\n print(\"Updated image graph.\")\n\n # Create informational json files for templates and files\n templates.build(paths, image_graph)\n mappings.build(paths, image_graph)\n print(\"Created JSON metadata files.\")", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def load_jpgs(path, size=(224, 224)):\n fnames = os.listdir(path)\n imgs = []\n i = 0\n if i<1500:\n for f in fnames:\n f= path + '/'+f\n if (os.path.isfile(f) and os.path.getsize(f) > 0):\n if not re.match('.+(jpg|jpeg|JPEG|JPG)', f):\n continue\n try:\n #image = Image.open(os.path.join(path, f))\n image = Image.open(f)\n except OSError:\n continue # ignore corrupt files\n data = list(image.getdata())\n im = Image.new(image.mode, image.size)\n im.putdata(data)\n if im.mode != 'RGB':\n im = im.convert('RGB')\n im = crop_center_or_reshape(im, size)\n img = 2 * (np.asarray(im) / 255) - 1\n #img= np.asarray(im)\n imgs.append(img)\n i= i+1\n\n return np.array(imgs)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def get_imgs(paths_list: list) -> list:\n \n imgs_list = [Image.open(project_path + data_path + paths_list[i]) for i in range(len(paths_list))]\n \n return imgs_list", "def read_images(path, sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n c = c+1\n return [X,y]", "def get_frames_for_sample(sample):\n path = os.path.join('data', sample[0])\n filename = sample[1]\n images = sorted(glob.glob(os.path.join(path, filename + '*jpg')))\n return images", "def read_images(fs, img_path_batch, mode=\"rb\"):\n result = []\n logging.info(\"Start to read images at {}\".format(socket.gethostname()))\n for (label, img_path) in img_path_batch:\n img = read_image(fs, img_path, mode)\n result.append((label, img))\n logging.info(\"Finish the reading of {} images on {}\".format(\n len(result), socket.gethostname()))\n return result", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el", "def _load_images(self, resolutions=None):\n images = {}\n\n for block, url in self.image_declarations:\n file_name = normalize_filename(url)\n if file_name not in images:\n img_resolutions = {}\n img = Image.open(file_name)\n img_resolutions[1] = img\n width, height = img.size\n\n if resolutions:\n for resolution in resolutions:\n # Get the correct filename for this resolution\n if resolution != 1:\n root, ext = os.path.splitext(file_name)\n res_file_name = '{root}-{resolution}x{ext}'.format(\n root=root, resolution=resolution, ext=ext)\n\n img = Image.open(res_file_name)\n if img.size[0] / resolution != width:\n raise ValueError('Invalid width for {0}'.format(\n res_file_name))\n if img.size[1] / resolution != height:\n raise ValueError('Invalid height for {0}'.format(\n res_file_name))\n img_resolutions[resolution] = img\n\n images[file_name] = img_resolutions\n\n return images", "def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)", "def list_images():\n return json_response(list_manifests())", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def imagesToJSON(folder_name):\n path = os.path.join('tests', 'data', 'images', folder_name)\n file_names = [file_name for file_name in os.listdir(path) if not file_name.startswith('.')]\n frames = []\n for name in file_names:\n image = Image.open(os.path.join(path, name))\n arr = []\n width, height = image.size\n for y in xrange(height):\n row = []\n for x in xrange(width):\n row.append(1 if not image.getpixel((x,y)) else 0)\n arr.append(row)\n frames.append(arr)\n writeFrames(frames, 'actual', folder_name)", "def load_images_test():\n\n path = os.path.join('./test','*.jpg')\n files = glob.glob(path)\n\n x_test = []\n x_test_id = []\n x_test_shape = []\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n print(fl)\n flbase = os.path.basename(fl)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_test.append(result_list[0])\n x_test_id.append(flbase)\n #cv2.imshow(\"dst\", dst2)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n pool.close()\n return x_test, x_test_id", "def read_images(path, image_size=None):\n c = 0\n X = []\n y = []\n folder_names = []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n folder_names.append(subdirname)\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n #try:\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n # resize to given size (if given)\n if (image_size is not None):\n im = cv2.resize(im, image_size)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n #except IOError, (errno, strerror):\n # print \"I/O error({0}): {1}\".format(errno, strerror)\n # except:\n # print \"Unexpected error:\", sys.exc_info()[0]\n # raise\n c = c+1\n return [X,y,folder_names]", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []" ]
[ "0.66397595", "0.6540607", "0.64433455", "0.64104563", "0.6374026", "0.63023806", "0.6257343", "0.6191441", "0.61159426", "0.6109508", "0.6003954", "0.5990808", "0.59906566", "0.59873354", "0.59793514", "0.5957272", "0.59243476", "0.59169495", "0.58602643", "0.58406574", "0.5839491", "0.5834419", "0.5816725", "0.58120024", "0.5807871", "0.58027506", "0.5796723", "0.5791353", "0.5789187", "0.57874215", "0.57849723", "0.5766853", "0.57324165", "0.57122374", "0.57105243", "0.5702928", "0.569996", "0.56967443", "0.5679896", "0.5677852", "0.56593543", "0.5654666", "0.56508976", "0.56483763", "0.56419796", "0.5641461", "0.5634144", "0.5628164", "0.56213665", "0.56127745", "0.5607521", "0.5605685", "0.56001735", "0.55996335", "0.5589563", "0.55759376", "0.55738044", "0.55718833", "0.5568825", "0.5567564", "0.55625695", "0.55594146", "0.5557466", "0.55477506", "0.5547044", "0.55440634", "0.5542597", "0.55422497", "0.55415446", "0.5533806", "0.5530132", "0.5525682", "0.5521553", "0.5518936", "0.5517556", "0.55144334", "0.55121475", "0.55113953", "0.5510559", "0.550586", "0.5505551", "0.55053407", "0.55008996", "0.5484825", "0.5481317", "0.5473927", "0.54711145", "0.5469024", "0.54688346", "0.54686475", "0.5468097", "0.54680204", "0.5466523", "0.54639107", "0.54594904", "0.5448807", "0.5445595", "0.5444978", "0.5442631", "0.54359293" ]
0.62735325
6
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame: return pd.concat(dataframes).reset_index(drop=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset_index(self):\n\n # reminder on multi index in columns\n df1 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]]).T\n df1.index = pd.Series([1, 2], name=\"idx1\")\n df1.columns = pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2'])\n\n # same data frame in single command\n df2 = pd.DataFrame([[1, 2, 11, 22], [3, 4, 33, 44]],\n index=pd.Series([1, 2], name=\"idx1\"),\n columns=pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2']))\n\n df2.loc[:, pd.IndexSlice[:, 'aa']] # getting all info using the second level of the column index out of it\n\n df2.T.reset_index().set_index(['idx_c', 'idx2']) # all together a nop\n self.assertTrue(df2.T.equals(df2.T.reset_index().set_index(['idx_c', 'idx2'])))\n df2.T.reset_index(0) # pull out first index level (idx_c)\n df2.T.reset_index(1) # pull out second index level (idx2)", "def _concat():\n df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']},\n index=[0, 1, 2, 3])\n\n df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],\n 'B': ['B4', 'B5', 'B6', 'B7'],\n 'C': ['C4', 'C5', 'C6', 'C7'],\n 'D': ['D4', 'D5', 'D6', 'D7']},\n index=[4, 5, 6, 7])\n\n df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],\n 'B': ['B8', 'B9', 'B10', 'B11'],\n 'C': ['C8', 'C9', 'C10', 'C11'],\n 'D': ['D8', 'D9', 'D10', 'D11']},\n index=[8, 9, 10, 11])\n frames = [df1, df2, df3]\n result = pd.concat(frames)\n print(result)\n result = pd.concat(frames, keys=['x', 'y', 'z'])\n print(result)\n print('-' * 20)\n df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],\n 'D': ['D2', 'D3', 'D6', 'D7'],\n 'F': ['F2', 'F3', 'F6', 'F7']},\n index=[2, 3, 6, 7])\n result = pd.concat([df1, df4], axis=1)\n print(result)\n print('*' * 40)\n result = pd.concat([df1, df4], axis=1, join='inner') # 取交集\n print(result)\n result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])\n print(result)", "def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)", "def _adjust_indices(left_df, right_df):\n index_diff = left_df.shape[0] - right_df.shape[0]\n if index_diff > 0:\n # right_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), right_df.shape[1]), np.nan),\n columns=right_df.columns,\n )\n right_df = pd.concat((empty_df, right_df), axis=0).reset_index(drop=True)\n elif index_diff < 0:\n # left_df is shorter\n empty_df = pd.DataFrame(\n np.full((np.abs(index_diff), left_df.shape[1]), np.nan),\n columns=left_df.columns,\n )\n left_df = pd.concat((empty_df, left_df), axis=0).reset_index(drop=True)\n\n return left_df, right_df", "def reframe_df(previous_df, processed_data):\n idx = previous_df.index\n col = previous_df.columns\n df = pd.DataFrame(data=processed_data, index=idx, columns=col)\n return df", "def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res", "def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n return res", "def merge (*a_data) :\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df", "def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res", "def drop_multindex(df):\n\n if isinstance(df.index, pd.MultiIndex):\n df_flat = df.reset_index()\n # keep index if False\n else:\n df_flat = df.copy()\n return df_flat", "def refresh_index(df):\n if isinstance(df.index, pandas.MultiIndex):\n return df.reset_index().set_index(df.index.names)\n else:\n return df", "def create_shifted_df(df: pd.DataFrame, periods: int = 1) -> pd.DataFrame:\n data_df_shifted = df.shift(periods=periods)\n data_df_shifted = data_df_shifted.combine_first(df).add_suffix(\"_shifted\")\n return pd.concat([df, data_df_shifted], axis=1, join=\"inner\").reset_index(\n drop=True\n )", "def reset_column_index(df: DataFrame, level: List[Any], drop: bool=True, inplace: bool=False):\n \n if inplace:\n if drop:\n df.columns = df.columns.droplevel(level)\n else:\n raise NotImplementedError\n return df\n else:\n if drop:\n result = df.copy()\n result.columns = df.columns.droplevel(level)\n else:\n result = df.stack(level)\n return result", "def reset_index(self):\n self.df = self.df.reset_index()", "def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)", "def stack_index(self, index, on_top=True, axis=1, inplace=False):\n\n def apply_func(obj_index):\n if on_top:\n return index_fns.stack_indexes(index, obj_index)\n return index_fns.stack_indexes(obj_index, index)\n\n return self.apply_on_index(apply_func, axis=axis, inplace=inplace)", "def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )", "def mergeDataframes(datasets, cut):\n #subset = []tion\n subset = [dataset.iloc[:, index:] for dataset in datasets[1:]]\n \n first = subset[0].join(subset[1:], how = 'outer')\n finance = datasets[0].iloc[:, index:].join(first, how = 'left') \n # don't need to cut as only using relatively recent data for training\n #finance = finance[finance.index > cut]\n return finance", "def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:\n return pd.concat(serieses, axis=\"columns\").T", "def merge_survey(self) -> pd.DataFrame:\n\n df_list = []\n for survey_id in self.survey_id:\n self.log.debug(f\"Reading: {survey_id}\")\n temp_df = self.get_survey_responses(survey_id)\n df_list.append(temp_df[2:])\n\n df_col = reduce(pd.Index.union, (df.columns for df in df_list))\n\n merged_df = pd.DataFrame()\n for df in df_list:\n temp_df = df.reindex(columns=df_col, fill_value=0)\n merged_df = merged_df.append([temp_df], ignore_index=True)\n return merged_df", "def reset_index(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.reset_index)(self, **kwargs)", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def broadcast_merge(s,df):\n \n return pd.merge(pd.DataFrame(data=[s.values]*len(df),\n columns=s.index,\n index=df.index),\n df, left_index=True, right_index=True)", "def combine_position_dataframes(dataframe1, dataframe2):\n\n # check that the dataframes have the same number of columns\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n frames = [dataframe1, dataframe2]\n\n combined_dataframe = pandas.concat(frames)\n\n dataframe1.drop(dataframe1.index, inplace=True) # Delete data from dataframe to save memory\n dataframe2.drop(dataframe2.index, inplace=True) # Delete data from dataframe to save memory\n\n # confirm that the dataframes no longer exist (saving memory)\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n # check that all rows of both dataframes have been combined into the new dataframe. Sort by date and time.\n print(\"Dimensions of combined dataframe: \", combined_dataframe.shape)\n combined_dataframe_sorted = combined_dataframe.sort_values('date_time')\n\n print(\"Sample of combined dataframe: \", combined_dataframe_sorted.sample(10))\n\n return combined_dataframe_sorted", "def __merge_dataframes(dataframes: List[pd.DataFrame], empty_df: pd.DataFrame = pd.DataFrame(),\n sorted_column: Optional[str] = None) -> pd.DataFrame:\n for df in dataframes:\n empty_df = empty_df.append(df, ignore_index=True)\n empty_df.drop_duplicates(keep='first')\n if sorted_column is not None:\n empty_df.sort_values(by=[sorted_column])\n return empty_df", "def extend_dataset(intial_df):\n all_data = []\n for i,row in intial_df.iterrows():\n all_data.extend(create_all_combination(row))\n\n extended_results = pd.DataFrame(all_data)\n return extended_results", "def coerce( self ):\n df = self.copy()\n gcond = ['neighbor', 'pdb'] if 'source' not in df.columns else ['neighbor', 'pdb', 'source']\n for frame_id, frame in df.groupby('frame'):\n g = frame.groupby(gcond)\n neighbors = len(g)\n neighbor = list(g.ngroup() + 1)\n position = list(g.cumcount() + frame_id)\n df.loc[(df['frame'] == frame_id), 'neighbors'] = [neighbors] * frame.shape[0]\n df.loc[(df['frame'] == frame_id), 'neighbor'] = neighbor\n df.loc[(df['frame'] == frame_id), 'position'] = position\n return df", "def build_multi_index_data_frame(data_frames: list, sub_header: list, header_columns: list) -> pd.DataFrame:\r\n\r\n tuples = build_multi_index_tuples(header_columns, sub_header)\r\n\r\n multi_header = pd.MultiIndex.from_tuples(tuples)\r\n\r\n df = pd.concat(data_frames, axis=1).loc[:, dict(tuples).keys()]\r\n\r\n df.columns = multi_header\r\n\r\n return df", "def concat_without_duplicates(dfs):\n temp_dfs = []\n for temp_df in dfs:\n # Joining the different dfs resulted in a df with more rows. This is why\n # I do this. More info on https://stackoverflow.com/a/34297689/5031446\n # This removes rows with duplicated indexes and keeps just the last observation\n temp_df = temp_df[~temp_df.index.duplicated(keep='last')]\n temp_dfs.append(temp_df)\n result = pd.concat(temp_dfs, axis=1)\n\n return result", "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df", "def _dataarray_unstack(da, sources, targets, roi_tot, fill_value, order,\n rm_missing):\n import pandas as pd\n\n da['roi'] = pd.MultiIndex.from_arrays(\n [sources + targets, targets + sources], names=['sources', 'targets'])\n da = da.unstack(fill_value=fill_value)\n\n # transpose, reindex and reorder (if needed)\n da = da.transpose('sources', 'targets', 'times')\n da = da.reindex(dict(sources=roi_tot, targets=roi_tot),\n fill_value=fill_value)\n\n # change order\n if isinstance(order, (list, np.ndarray)):\n if rm_missing:\n order = [k for k in order.copy() if k in roi_tot.tolist()]\n da = da.reindex(dict(sources=order, targets=order))\n\n return da, order", "def merge_logs(dfs):\n return pd.concat(dfs, ignore_index=True)", "def merge_bg_pop(df):\n pop = blocks_population().to_frame('pop')\n pop['bg_id'] = pop.index.astype(str).str[0:12]\n pop = pop.groupby('bg_id')['pop'].sum().to_frame('pop')\n new_df = df.join(pop)\n assert new_df['pop'].notnull().min()\n return new_df", "def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def df_combine(array_df):\n import pandas as pd\n cols = []\n for i in range(len(a)):\n #print(i)\n if array_df[i].columns[0] in cols:\n array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]\n cols.append(array_df[i].columns[0])\n return pd.concat(a, axis=1, sort=True)", "def pandas_unstack(x):\n if not is_multiindex_dataframe(x):\n if is_dataframe(x):\n return x\n elif issubclass(type(x), pd.Series):\n return pd.DataFrame(x).T\n elif type(x) is list and all([is_dataframe(d) for d in x]):\n return x\n else:\n raise Exception(f'Unsupported datatype: {type(x)}')\n\n names = list(x.index.names)\n grouper = 'ID'\n if not (grouper in names):\n names[0] = grouper\n elif not (names[0] == grouper):\n for i in np.arange(\n len(names)): # trying n things other than 'ID'; one must be outside of the n-1 remaining names\n next_grouper = f'{grouper}{i}'\n if not (next_grouper in names):\n names[0] = next_grouper\n grouper = next_grouper\n break\n assert names[0] == grouper, 'Unstacking error'\n\n x.index.rename(names, inplace=True)\n\n groups = list(x.groupby(grouper))\n n_levels = len(groups[0][1].index.levels)\n if n_levels > 2:\n g = groups[0][1]\n index = pd.MultiIndex.from_arrays([g.index.get_level_values(len(g.index.levels) - n)\n for n in range(1, len(g.index.levels))][::-1])\n return [d[1].set_index(index) for d in groups]\n else:\n return [d[1].set_index(d[1].index.get_level_values(len(d[1].index.levels) - 1))\n for d in list(x.groupby(grouper))]", "def _dataframe_conversion(da, order):\n assert da.data.squeeze().ndim == 2, (\n \"Dataframe conversion only possible for connectivity arrays when \"\n \"time dimension is missing\")\n da = da.squeeze().to_dataframe('mi').reset_index()\n da = da.pivot('sources', 'targets', 'mi')\n if isinstance(order, (list, np.ndarray)):\n da = da.reindex(order, axis='index').reindex(order, axis='columns')\n\n return da", "def combine_dfs(dfs, column_name='population'):\n cdfs = []\n for name, path in dfs.items():\n print('Working on {}'.format(path))\n df = pd.read_pickle(path)\n df[column_name] = name\n df['snp'] = df.index.to_series()\n df['idx'] = df[column_name].astype(str) + '.' + df['snp'].astype(str)\n cdfs.append(df)\n print('Combining')\n return pd.concat(cdfs)", "def regular_index(*dfs):\n original_index = [df.index for df in dfs]\n have_bad_index = [not isinstance(df.index, pd.RangeIndex)\n for df in dfs]\n\n for df, bad in zip(dfs, have_bad_index):\n if bad:\n df.reset_index(drop=True, inplace=True)\n\n try:\n yield dfs\n finally:\n for df, bad, idx in zip(dfs, have_bad_index, original_index):\n if bad and len(df.index) == len(idx):\n df.index = idx", "def process_and_merge(s):\n l = [preprocessing(df) for df in s]\n d = {x.name: x for x in l}\n df = pd.DataFrame(d)\n df.index.names = [x.lower() for x in df.index.names]\n return pd.DataFrame(d)", "def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n from pyspark.pandas import DataFrame\n\n groupby = self._groupby\n psdf = groupby._psdf\n\n # Here we need to include grouped key as an index, and shift previous index.\n # [index_column0, index_column1] -> [grouped key, index_column0, index_column1]\n new_index_scols: List[Column] = []\n new_index_spark_column_names = []\n new_index_names = []\n new_index_fields = []\n for groupkey in groupby._groupkeys:\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(groupkey.spark.column.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(groupkey._column_label)\n new_index_fields.append(groupkey._internal.data_fields[0].copy(name=index_column_name))\n\n for new_index_scol, index_name, index_field in zip(\n psdf._internal.index_spark_columns,\n psdf._internal.index_names,\n psdf._internal.index_fields,\n ):\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(new_index_scol.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(index_name)\n new_index_fields.append(index_field.copy(name=index_column_name))\n\n if groupby._agg_columns_selected:\n agg_columns = groupby._agg_columns\n else:\n # pandas doesn't keep the groupkey as a column from 1.3 for DataFrameGroupBy\n column_labels_to_exclude = groupby._column_labels_to_exclude.copy()\n if isinstance(groupby, DataFrameGroupBy):\n for groupkey in groupby._groupkeys: # type: ignore[attr-defined]\n column_labels_to_exclude.add(groupkey._internal.column_labels[0])\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in column_labels_to_exclude\n ]\n\n applied = []\n for agg_column in agg_columns:\n applied.append(agg_column._with_new_scol(func(agg_column.spark.column))) # TODO: dtype?\n\n # Seems like pandas filters out when grouped key is NA.\n cond = groupby._groupkeys[0].spark.column.isNotNull()\n for c in groupby._groupkeys[1:]:\n cond = cond | c.spark.column.isNotNull()\n\n sdf = psdf._internal.spark_frame.filter(cond).select(\n new_index_scols + [c.spark.column for c in applied]\n )\n\n internal = psdf._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in new_index_spark_column_names],\n index_names=new_index_names,\n index_fields=new_index_fields,\n column_labels=[c._column_label for c in applied],\n data_spark_columns=[\n scol_for(sdf, c._internal.data_spark_column_names[0]) for c in applied\n ],\n data_fields=[c._internal.data_fields[0] for c in applied],\n )\n\n return groupby._handle_output(DataFrame(internal))", "def rel_matrix(df_long: pd.DataFrame) -> None:\n pass", "def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)", "def mapback(df):\n df.set_index(df['Datetime'], drop=False, inplace=True) #keep for later use\n df.sort_index(inplace=True)", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def index_reformat(series: pd.Series, preserve_order: bool) -> pd.DataFrame:\n series = series.copy()\n series = rewrite_index(series)\n series.index = remove_constant_levels(series.index)\n series.index.names = [LEVEL_NAMES.get(name, name) for name in series.index.names]\n series = series.rename(index=pretty_rewrite)\n\n # Preserve order of inputs\n df = series.unstack(\"Target\")\n if preserve_order:\n df = df.reindex(columns=series.index.get_level_values(\"Target\").unique())\n for level in series.index.names:\n kwargs = {}\n if isinstance(df.index, pd.MultiIndex):\n kwargs = dict(level=level)\n if level != \"Target\":\n df = df.reindex(index=series.index.get_level_values(level).unique(), **kwargs)\n else:\n df = df.sort_index()\n return df", "def horizontal_concat():\n df_1 = pd.DataFrame(get_mixed_matrix())\n df_2 = pd.DataFrame(get_mixed_matrix())\n df_2.drop([9], inplace=True)\n print(\"df_1:\")\n print(df_1)\n print(\"df_2:\")\n print(df_2)\n # axis = 1 performs column concatenation (horizontal concatenation)\n concat = pd.concat([df_1, df_2], axis=1)\n print(\"concat:\")\n print(concat.to_string())", "def stack(*datasets):\n return Dataset(\n data=vstack([d.data for d in datasets]),\n target=stack_or_concat([d.target for d in datasets]),\n sample_info=np.concatenate([d.sample_info for d in datasets]),\n info={\n k: [d.info.get(k, None) for d in datasets]\n for k in merge_dicts(*[d.info for d in datasets])\n },\n )", "def merge_dfs(userdf, filtered_apidf):\n userdf['SOURCE']='USER'\n filtered_apidf['SOURCE']='API'\n filtered_apidf.rename(columns={'_id': 'bids_name'}, inplace=True)\n\n merged_df = pd.concat([userdf,filtered_apidf], sort=True).fillna(0)\n # merged_df['_INDEX']=merged_df.index\n\n # merged_df_with_index = pd.DataFrame(index = merged_df.index, data= merged_df)\n return merged_df", "def pad_df(df):\n # 1. compute the sizes of each sample_nr\n sr_sizes = df.groupby(df.index.get_level_values(0)).size()\n # Get the sample label\n labels = df.groupby(df.index.get_level_values(0))['label'].mean()\n # compute max size and #sample_nr\n max_size = sr_sizes.max()\n n_sample_nrs = len(sr_sizes)\n\n # 2. preallocate the output array and fill\n arr = np.zeros((max_size * n_sample_nrs, len(df.columns)))\n idx_lv0 = df.index.get_level_values(0) # get sample_nr\n for i in tqdm(range(n_sample_nrs), desc='Padding data'):\n row = i * max_size\n arr[row:row + sr_sizes.iloc[i], :] = df[idx_lv0 == sr_sizes.index[i]].values\n arr[row:row + max_size, -1] = labels[i + 1]\n\n # 3. convert to dataframe\n df_ans = pd.DataFrame(\n data=arr,\n index=pd.MultiIndex.from_product([sr_sizes.index, range(max_size)]),\n columns=df.columns\n ).rename_axis(df.index.names, axis=0)\n\n return df_ans", "def segmented_pivot_table(data_stack, index, columns, values, rows=10000000):\r\n size = data_stack.shape[0]\r\n pivot_matrix = []\r\n for i in range(0, size, rows):\r\n partial_pivot = data_stack[i: min(i+rows, size)].pivot_table(\r\n index=index,\r\n columns=columns,\r\n values=values,\r\n fill_value=0,\r\n aggfunc=np.sum,\r\n )\r\n pivot_matrix.append(partial_pivot)\r\n pivot_matrix = pd.concat(pivot_matrix).fillna(0)\r\n # Because the default axis to concatenate is the 0, some duplicate\r\n # index appear with null values. With this groupby, the duplicate axis\r\n # disappear, keeping the original values.\r\n pivot_matrix = pivot_matrix.groupby(pivot_matrix.index).sum()\r\n if len(index) >= 2:\r\n # Groupby transforms multiindex structure into a tuple. This line\r\n # reverse the transformation.\r\n pivot_matrix.index = pd.MultiIndex.from_tuples(\r\n pivot_matrix.index,\r\n names=index,\r\n )\r\n return pivot_matrix", "def _wrap_in_pandas_container(\n data_to_wrap,\n *,\n columns,\n index=None,\n):\n if issparse(data_to_wrap):\n raise ValueError(\"Pandas output does not support sparse data.\")\n\n if callable(columns):\n try:\n columns = columns()\n except Exception:\n columns = None\n\n pd = check_pandas_support(\"Setting output container to 'pandas'\")\n\n if isinstance(data_to_wrap, pd.DataFrame):\n if columns is not None:\n data_to_wrap.columns = columns\n return data_to_wrap\n\n return pd.DataFrame(data_to_wrap, index=index, columns=columns, copy=False)", "def transform(self, df):\n df = df.copy()\n \"\"\"\n if self.grouping is not None:\n df = self.hier.transform(df)\n \"\"\"\n # fill NaN\n df = self.fill_na(df)\n\n self.df_index = df.index\n self.df_colnames = df.columns\n # transformations\n for i in sorted(self.transformations.keys()):\n transformation = self.transformations[i]\n df = self.transformers[i].transform(df)\n # convert to DataFrame only if it isn't already\n if not isinstance(df, pd.DataFrame):\n df = pd.DataFrame(df)\n df.index = self.df_index\n df.columns = self.df_colnames\n # update index reference if sliced\n if transformation in ['Slice']:\n self.df_index = df.index\n self.df_colnames = df.columns\n df = df.replace([np.inf, -np.inf], 0) # .fillna(0)\n return df", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def prepare_and_save_data(df, index=None, output_path=None):\n assert index is not None\n assert output_path is not None\n\n df.columns.name = 'fundamental_variable'\n df = df.set_index(['Quarter end', 'symbol'])\n\n df = df.stack()\n df = df.unstack(level=1)\n\n # In the initial csv files, None is used to represent missing data\n df[df == 'None'] = np.nan\n\n df.columns.name = None\n\n #df.reset_index().to_csv(path.join(data_dir, 'fundamental_data_of_stocks_stacked.csv'), index=False)\n\n # split the large dataframe based on the fundamental_variables\n # now each DataFrame represent one fundamental variable about a bunch of stocks.\n df = df.reset_index()\n\n\n # convert Quarter end to datetime object\n df['Quarter end'] = pd.to_datetime(df['Quarter end'])\n df = df.sort(['Quarter end'], axis=0)\n # convert the strings to float\n df = df.set_index(['Quarter end', 'fundamental_variable']).astype(float).reset_index(level=1)\n\n\n def _reindex(df):\n # This function will be applied to each fundamental variable group.\n # It will reindex the DataFrame. It will add the target index to the existing index and sort the new index\n new_index = index.append(df.index).drop_duplicates().values\n new_index.sort()\n\n # fill NaN with forward fill method. We should use forward filling because we can only use historical data\n # not the data from the future.\n df = df.reindex(new_index).fillna(method='ffill')\n\n return df\n\n\n def _to_csv(df):\n var_name = df.iloc[0]['fundamental_variable']\n print(\"{:<30} {}\".format(\"preparing fundamental variable\", var_name))\n var_name = re.sub('[-\\s\\&\\/]', '_', var_name) # remove special characters\n var_name = re.sub('_+', '_', var_name) # remove repetitive underscores\n var_name = re.sub('_$', '', var_name) # remove the tailing underscore\n df.set_index(['Quarter end']).reindex(index).to_csv(path.join(output_path, var_name + '.csv'), index=True)\n\n\n\n # The following step is to reindex the DataFrame. Note that the reindex is done by each fundamental variable.\n # The reason for this is if we do the reindex without groupby, there will be NaN in the fundamental_variable after\n # reindex operation, which is problematic for the forward fill operation of the next step\n df_reindex = df.groupby(by=['fundamental_variable'], as_index=False).apply(_reindex).sort_index()\n df_reindex.index = df_reindex.index.droplevel(0)\n\n # save to csv file by each fundamental variable\n df_out = df_reindex.reset_index()\n df_out.groupby(by=['fundamental_variable']).apply(_to_csv)", "def compare(old_dataframe, fresh_dataframe):\n combined_dataframe = pd.concat([old_dataframe, fresh_dataframe])\n combined_dataframe = combined_dataframe.reset_index(drop=True)\n\n grouped_dataframes = combined_dataframe.groupby(DataFrameRow.REQUIRED)\n\n # if there is overlap, there will be a column with length > 1\n unique_indices = [col[0] for col in grouped_dataframes.groups.values() if\n len(col) == 1]\n\n return combined_dataframe.reindex(unique_indices)", "def consolidate_results(df, offsets):\n for i in range(len(offsets) - 1):\n df_tmp = df[offsets[i] : offsets[i + 1]]\n df_tmp[\"labels\"] = i\n if i == 0:\n df_consolidate = df_tmp\n else:\n df_consolidate = cudf.concat([df_consolidate, df_tmp])\n return df_consolidate", "def refresh(self, do_transforms=True) -> pd.DataFrame:\n log.info(f\"Refreshing dataset {self.name}\")\n # SKeleton df should have many indices to join in all the children\n df = self.table_skeleton.df\n log.debug(f\"Dataset {self.name} has index cols {df.index.names}.\")\n\n # if we are subsetting by columns do this per source\n if self.cols:\n for table in self.tables:\n cols_this_table = [\n col for col in self.cols if col in table.cols\n ]\n # if any cols to get\n if cols_this_table:\n # Join them in\n len_pre = len(df)\n log.debug(\n f\"Joining in {cols_this_table} from table {table.name}\"\n )\n df = df.join(table.get_df_cols(cols_this_table))\n if not len_pre == len(df):\n raise RuntimeError(\"Joining changed the row count!\")\n # Else just get all cols\n else:\n for table in self.tables:\n log.debug(f\"Joining in all of {table.name}\")\n len_pre = len(df)\n df = df.join(table.df)\n if not len_pre == len(df):\n raise RuntimeError(\"Joining changed the row count!\")\n\n # After joining in all the tables set the index to regular panel\n log.debug(f\"Reset index {self.name}\")\n df.reset_index(inplace=True)\n log.debug(f\"Setting index {self.ids} on {self.name}.\")\n df.set_index(self.ids, inplace=True)\n log.debug(f\"Sorting cols on {self.name}\")\n df.sort_index(axis=1, inplace=True)\n log.debug(f\"Sorting rows on {self.name}\")\n df.sort_index(axis=0, inplace=True)\n\n # Balance the index if we want\n if self.balance:\n df = datautils.balance_panel_last_t(df)\n # if we extend the index to be balanced we just backfill\n df = missing.extrapolate(df)\n\n if self.transforms and do_transforms:\n # Transforms might need geometry, so do them on a geodataframe\n log.debug(f\"Joining in gdf to {self.name}\")\n df = self.just_geometry_gdf.join(df)\n log.debug(\n f\"Computing {len(self.transforms)} transforms \"\n f\"for dataset {self.name}\"\n )\n for transform in self.transforms:\n df[transform.name] = np.nan\n df[transform.name] = transform.compute(df)\n # Geodataframes with geometry can't be persisted in parquet\n # So we drop the geometry\n df = df.drop(columns=[\"geom\"])\n\n # Sort our data again as transforms aren't ordered.\n df = df.sort_index(axis=0).sort_index(axis=1)\n\n # Store the data\n io.df_to_parquet(df, path=self.path)\n return df", "def set_frame_index(self):\n if 'frame' in self.df.columns.values.tolist():\n if self.df.index.name == 'frame':\n self.df = self.df.drop('frame', 1)\n else:\n self.df = self.df.set_index('frame')", "def stack_table(A: pd.DataFrame) -> pd.DataFrame:\r\n A = pd.DataFrame(A.stack(dropna=False))\r\n A.columns = ['factor']\r\n return A", "def create_Xy_df(X_df, y_df, on_cols):\n return pd.merge(X_df, y_df, how='inner', on=on_cols)", "def shift_df(df, shift, shift_names):\n\n other_names = [name for name in df.columns if name not in shift_names]\n\n df1 = df.loc[:, shift_names].drop(df.head(shift).index)\n df2 = df.loc[:, other_names].drop(df.tail(shift).index)\n df2.index += shift # need to match index, otherwise concat will ignore offset\n new_df = pd.concat((df1, df2), axis=1, ignore_index=True, join='inner')\n new_df.columns = shift_names + other_names\n del df1, df2\n df_shifted = new_df\n del new_df\n\n # Reset index\n df_shifted.reset_index(inplace=True)\n df_shifted = df_shifted.drop(['index'], axis=1)\n\n return df_shifted", "def unstack(self, level, fill_value):\n return DataFrameDefault.register(pandas.DataFrame.unstack)(\n self, level=level, fill_value=fill_value\n )", "def rearrange(df: pd.DataFrame) -> pd.DataFrame:\r\n df = df.pivot(index='Instrument', columns='Date', values='Close Price').transpose()\r\n df.index = pd.to_datetime(df.index)\r\n return df", "def stack_merger(df):\n\n # pick out all columns that may contain diagnoses\n diagnosis_vars = ['dx_1', 'dx_2', 'dx_3','dx_4','dx_5','dx_6']\n # select vars with just diagnosis info\n diag_df = df[diagnosis_vars]\n #stack diagnosis var subset using index of original df\n stacked_df = diag_df.set_index([diag_df.index]).stack().reset_index()\n # rename stacked vars\n stacked_df = stacked_df.rename(columns = {\"level_0\" : \"patient_index\", 'level_1' : 'diagnosis_id', 0 : 'cause_code'})\n # replace diagnosis_id with codes, 1 for primary, 2 for secondary and beyond\n stacked_df['diagnosis_id'] = np.where(stacked_df['diagnosis_id'] == 'dx_1', 1, 2)\n # merge stacked_df with the original df\n merged_df = stacked_df.merge(df, left_on = 'patient_index', right_index = True, how = 'outer')\n\n # verify that no data was lost\n # Check if primary diagnosis value counts are the same\n assert (diag_df[diagnosis_vars[0]].value_counts()\n == merged_df['cause_code'].loc[merged_df['diagnosis_id'] == 1].value_counts()).all(),\\\n \"Primary Diagnoses counts are not the same before and after the Stack-Merge\"\n # check if all primary diagnosis are present before and after\n assert (diag_df[diagnosis_vars[0]].sort_values().values\n == merged_df[merged_df['diagnosis_id'] == 1]['cause_code'].dropna().sort_values().values).all(),\\\n \"Not all Primary Diagnoses are present before and after the Stack-Merge\"\n\n # check if counts of all secondary diagnoses are the same before and after\n old_second_total = diag_df[diagnosis_vars[1:]].apply(pd.Series.value_counts).sum(axis = 1)\n new_second_total = merged_df['cause_code'].loc[merged_df['diagnosis_id'] == 2].value_counts()\n assert (old_second_total.sort_index().values == new_second_total.sort_index().values).all(),\\\n \"The counts of Secondary Diagnoses were not the same before and after the Stack-Merge\"\n #check if all secondary diagnoses are present before and after\n assert (old_second_total.sort_index().index == new_second_total.sort_index().index).all(),\\\n \"Not all Secondary Diagnoses are present before and after the Stack-Merge\"\n\n # drop all the diagnosis features, we don't need them anymore\n merged_df.drop(diagnosis_vars, axis = 1, inplace = True)\n print(\"All tests passed\")\n return merged_df", "def recompute_frame(df):\n\t\n\t# recomputing\n\tdf_u = df.groupby('user_id')\n\tdf_b = df.groupby('business_id')\n\tavg_u, review_count_u = df_u['stars'].mean(), df_u['review_id'].count()\n\tavg_b, review_count_b = df_b['stars'].mean(), df_b['review_id'].count()\n\n\t\"\"\"\n\t# using the merge way \n\t\n\t# remove the original column \n\tdf.drop( [ 'user_avg', 'user_review_count' ], axis = 1, inplace = True )\n\tdf.drop( [ 'business_avg', 'business_review_count' ], axis = 1, inplace = True )\n\n\tuser = pd.concat( [ avg_u, review_count_u ], axis = 1 )\n\tuser.columns = [ 'user_avg', 'user_review_count' ]\n\tbusiness = pd.concat( [ avg_b, review_count_b ], axis = 1 )\n\tbusiness.columns = [ 'business_avg', 'business_review_count' ]\n\n\tdf = df.merge( user, left_on = 'user_id', right_index = True )\n\tdf = df.merge( business, left_on = 'business_id', right_index = True )\n\t\"\"\"\n\t\n\tdf = df.copy() # revent copy warning \n\n\t# assign the value back according to the index\n\tdf.set_index( ['business_id'], inplace = True )\n\tdf['business_avg'] = avg_b\n\tdf['business_review_count'] = review_count_b\n\tdf.reset_index( inplace = True )\n\tdf.set_index( ['user_id'], inplace = True )\n\tdf['user_avg'] = avg_u\n\tdf['user_review_count'] = review_count_u\n\tdf.reset_index( inplace = True )\n\treturn df", "def joiner(spine_df: pd.DataFrame, *dfs: pd.DataFrame) -> pd.DataFrame:\n id_columns = _get_id_columns(data=spine_df)\n\n merged_dfs = reduce(\n lambda df, df2: df.merge(df2, on=id_columns, how=\"left\"), dfs, spine_df\n )\n # Confirm that the number of rows is unchanged after the operation has completed\n assert spine_df.shape[0] == merged_dfs.shape[0]\n return merged_dfs", "def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def compute_df_reindexed(dfs, dfname, dfname_ri=None):\n \n if dfname_ri is None:\n dfname_ri = dfname + \"_reindexed\"\n \n \"\"\" Prepare data frame, transpose, drop calendar index\"\"\"\n df = dfs[dfname].copy()\n df = df.T\n df = df.reset_index(drop=True)\n\n \"\"\" Prepare indexing data frame (from active cases), transpose, drop calendar index\"\"\"\n idf = dfs[\"active_cases\"].copy()\n idf = idf.T\n idf = idf.reset_index(drop=True)\n\n \"\"\" Add two time periods (Hubei is otherwise too long or starts too late) \"\"\"\n for i in range(2):\n df.append(pd.Series(), ignore_index=True)\n idf.append(pd.Series(), ignore_index=True)\n \n len_data = len(df)\n \n \"\"\" Go through countries, shift start of the epidemic to the beginning of the data frame\"\"\"\n dfcols = df.columns\n for ccol in dfcols:\n idx = np.argmax(np.asarray(idf[ccol])>=100)\n if idx==0 and idf[ccol][0] < 100:\n idx = len_data\n \"\"\"Denmark and South Korea have big jumps at ~ 100 cases\"\"\"\n if ccol in [\"Denmark\", \"Korea, South\"]:\n idx -= 1\n \"\"\"Hubei starts two time periods after start of the epidemic, most other start too early\"\"\"\n if ccol != \"Hubei\":\n replacement_0 = np.asarray(df[ccol][idx:])\n replacement_1 = np.empty(idx)\n replacement_1[:] = np.nan\n else:\n replacement_1 = np.asarray(df[ccol][:-2])\n replacement_0 = np.empty(2)\n replacement_0[:] = np.nan\n \n replacement = np.hstack((replacement_0, replacement_1))\n df[ccol] = pd.Series(replacement)\n \n \"\"\" Transpose back, return\"\"\"\n dfs[dfname_ri] = df.T\n return(dfs)", "def filter_input(input_df, target_df):\n # input_df = input_df.reindex(target_df.index, copy=False)\n data_df = pd.concat((input_df, target_df), join=\"inner\", copy=False, axis=1)\n return data_df", "def store_predictions(df):\n ts = df[df.columns[1]]\n base = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted '+ df.columns[1])\n base.index = df['year']\n base = base.append(pd.DataFrame(preds), sort = True)\n for col in df.columns[2:]:\n ts = df[col]\n temp = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted ' + col)\n temp.index = df['year']\n temp = temp.append(pd.DataFrame(preds), sort = True)\n base = base.join(temp)\n return base", "def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)", "def reindexed_dataframe(self):\n tmp = self.metric.copy()\n tmp.index = tmp.index.map(self.match_regions())\n #give the same index order as the geojson\n out = tmp.reindex(index = self.regions_names())\n return out\n #index_name = out.index.name\n\n #return out.reset_index().dropna().set_index(index_name)[self.metric.name]", "def merge_df_rows(dlist):\n\n # Create Dataframe from the dlist files\n dframe = concat(dlist, axis=0, join='outer', sort=False)\n\n # Sort the df based on the datetime index\n dframe.sort_values(by='Dates', inplace=True)\n\n # Setting Dates as the dataframe index\n dframe.set_index(['Dates'], drop=True, inplace=True)\n\n # Dropiing duplicated time points that may exist in the data\n dframe = dframe[~dframe.index.duplicated()]\n\n return dframe", "def related_df_shaper(df): \n id_related=list()\n id_primary=list()\n id_relation_type=list()\n for id_term in df.id_term:\n \n related_id_list=df.loc[df.id_term==id_term,'related_terms'].values[0]\n id_relation_type_list=df.loc[df.id_term==id_term,'id_relation_type'].values[0]\n for i in range(len(related_id_list)):\n id_related.append(related_id_list[i])\n id_relation_type.append(id_relation_type_list[i])\n id_primary.append(id_term)\n \n df_rs=pd.DataFrame({'id_term':id_primary,'id_term_related':id_related,'id_relation_type':id_relation_type})\n now=pd.to_datetime(datetime.datetime.now())\n df_rs=df_rs.assign(datetime_created=now)\n df_rs=df_rs.assign(datetime_updated=now)\n df_rs=df_rs.assign(id_user_created=7)\n df_rs=df_rs.assign(id_user_updated=7)\n \n return df_rs", "def warping_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.warping_records(run_idxs))", "def spMultiIndex(self):\n # reset column levels\n self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])\n self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])\n\n # list df vars for index specs\n dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute\n calcs = ['Filtered', 'RMS', 'RMSmavg']\n lvl0 = np.repeat(self.channels, len(calcs))\n lvl1 = calcs*len(self.channels) \n \n # combine & custom sort\n self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n tile_len = len(self.tile_values_lag_1.index)\n df_len = df.shape[0]\n sdf = pd.DataFrame(\n np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))\n )\n if trans_method == 'original':\n sdf = sdf.tail(df_len)\n else:\n sdf = sdf.head(df_len)\n sdf.index = df.index\n sdf.columns = df.columns\n return df + sdf", "def getNewDF_X(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF[['R1', 'G1', 'B1', 'R2', 'G2', 'B2', 'R3', 'G3', 'B3']].iloc[:] + unit\n new_temps[unit]['W1'] = originalDF['W1']\n new_temps[unit]['W2'] = originalDF['W2']\n new_temps[unit]['W3'] = originalDF['W3']\n returnVal = pd.concat(new_temps)\n return returnVal", "def _merge(dts):\n df = pd.concat(dts)\n\n ma = df.pivot(index='isomir', columns='sample', values='counts')\n ma_mirna = ma\n ma = ma.fillna(0)\n ma_mirna['mirna'] = [m.split(\":\")[0] for m in ma.index.values]\n ma_mirna = ma_mirna.groupby(['mirna']).sum()\n ma_mirna = ma_mirna.fillna(0)\n return ma, ma_mirna", "def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())", "def _join_multilevel_dataframes(df_list):\n minx_df = []\n for df in df_list:\n if isinstance(df.columns, pd.MultiIndex):\n minx_df.append(df)\n else:\n df.columns = pd.MultiIndex.from_product([df.columns, ['']])\n minx_df.append(df)\n # Join all dataframes together\n multi_concat = pd.concat(minx_df, axis=1)\n return multi_concat", "def reindex_hfd5(self):\n dfs = []\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n dfs.append(self.load_blob_metadata_value_df(blob_uuid))\n df = pd.concat(dfs)\n self.index.df = df\n self.index.to_hdf5(os.path.join(self.rootpath, self.INDEXFILENAME))\n return df", "def merge_blocks_pop(df):\n pop = blocks_population()\n pop.index.name = 'block_id'\n new_df = df.join(pop)\n assert new_df['pop'].notnull().min()\n return new_df", "def conn_reshape_undirected(da, sep='-', order=None, rm_missing=False,\n fill_value=np.nan, to_dataframe=False,\n inplace=False):\n assert isinstance(da, xr.DataArray)\n if not inplace:\n da = da.copy()\n assert 'roi' in list(da.dims)\n if 'times' not in list(da.dims):\n da = da.expand_dims(\"times\")\n\n # get sources, targets names and sorted full list\n sources, targets, roi_tot = _untangle_roi(da, sep)\n\n # build the multiindex and unstack it\n da = xr.concat((da, da), 'roi')\n da, order = _dataarray_unstack(da, sources, targets, roi_tot, fill_value,\n order, rm_missing)\n\n # dataframe conversion\n if to_dataframe:\n da = _dataframe_conversion(da, order)\n\n return da", "def DataFrame(dat):\n keys = dat.keys()\n l = []\n for key in keys:\n v = dat[key]\n assert type(v) is np.ndarray and v.ndim <= 2 and v.ndim >= 1, \\\n '%s must be np.ndarray with 1 <= ndim <= 2 !' % key\n\n if v.ndim == 1:\n ix = pd.MultiIndex.from_product([[key]] + [[0]])\n l.append(pd.DataFrame(v[:,np.newaxis], columns=ix))\n else:\n ix = pd.MultiIndex.from_product([[key]] + [\n np.arange(s) for s in v.shape[1:]\n ])\n l.append(pd.DataFrame(v, columns=ix))\n return pd.concat(l, axis=1)", "def populate_df(df):\n uniques = pd.unique(df.values.ravel('K'))\n zeros = np.zeros(len(uniques))\n\n # main df protection\n df = df.copy(deep=True)\n\n all_dummies = []\n for row in df.itertuples():\n i = 1\n uniques_dic = dict(zip(uniques, zeros))\n while i < 6:\n uniques_dic[row[i]] = 1\n i += 1\n\n all_dummies.append(uniques_dic)\n return pd.DataFrame(all_dummies, columns=uniques)", "def _repivot_dataframe(armscore_df: pd.DataFrame) -> pd.DataFrame:\n\n transform = (\n armscore_df.set_index([\"trial_index\", \"arm_name\", \"metric_name\"])\n .unstack(\"metric_name\")\n .reset_index()\n )\n new_cols = transform.columns.to_flat_index()\n parameters_holder = transform[\n list(filter(lambda x: \"parameters\" in x, new_cols))[0]\n ]\n transform.drop(columns=\"parameters\", level=0, inplace=True)\n new_cols = new_cols.drop(labels=filter(lambda x: \"parameters\" in x, new_cols))\n transform.columns = [\"trial_index\", \"arm_name\"] + [\n \"_\".join(tpl) for tpl in new_cols[2:]\n ]\n transform[\"parameters\"] = parameters_holder\n # pyre-fixme[7]: Expected `DataFrame` but got `Union[DataFrame, Series]`.\n return transform", "def hstack_columns(table, table_other):\n stacked = Table()\n\n for column in table.colnames:\n data = np.hstack([table[column].data[0], table_other[column].data[0]])\n stacked[column] = data[np.newaxis, :]\n return stacked", "def concat_and_sort(self):\n for link in self.to_concat:\n \n to_concat = self.to_concat[link]\n df = pd.concat(to_concat,axis=0)\n df=df.sort_values(by=['day','actualtime_arr_from'])\n for d in df['day'].unique():\n self.data[d][link] = {}\n temp = df[df['day']==d]\n \n for r in temp['routeid'].unique(): \n self.data[d][link][r] = temp[temp['routeid']==r][['actualtime_arr_from','actualtime_arr_to','routeid']].values \n del(temp)\n del(df)\n del(self.to_concat)", "def rebuild_indexes(self):\n self.cards = sorted(self.name_to_card.values(), key=lambda card: card.name)\n self.card_sets = sorted(\n self.code_to_card_set.values(), key=lambda cset: cset.release_date\n )\n\n self.set_code_to_printings = collections.defaultdict(list)\n self.card_name_to_printings = collections.defaultdict(list)\n self.set_name_num_mv_to_printings = collections.defaultdict(list)\n\n for printing in self.id_to_printing.values():\n self.set_code_to_printings[printing.set_code].append(printing)\n self.card_name_to_printings[printing.card_name].append(printing)\n # snnm == (set, name, number, multiverseid)\n snnm_index_keys = {\n # pylint: disable=line-too-long\n (\n printing.set_code,\n printing.card_name,\n printing.set_number,\n printing.multiverseid,\n ),\n (printing.set_code, printing.card_name, None, printing.multiverseid),\n (printing.set_code, printing.card_name, printing.set_number, None),\n (printing.set_code, printing.card_name, None, None),\n }\n for key in snnm_index_keys:\n self.set_name_num_mv_to_printings[key].append(printing)\n\n for printings in self.set_code_to_printings.values():\n printings.sort(key=set_code_to_printings_key)\n\n for printings in self.card_name_to_printings.values():\n printings.sort(key=card_name_to_printing_key)\n\n # Build ordered indexes\n self.set_code_to_printing_to_row = {}\n for set_code, printings in self.set_code_to_printings.items():\n self.set_code_to_printing_to_row[set_code] = {\n printing: i for i, printing in enumerate(printings)\n }", "def concat_statistics(statistics_df, summary_df, total_df, sort_columns):\n \n # concatenate statistics dataframes\n statistics_df = pd.concat([statistics_df, summary_df])\n statistics_df.sort_values(by=sort_columns, inplace=True)\n statistics_df = pd.concat([statistics_df, total_df])\n # reset indexes in final statistics DataFrame\n statistics_df.reset_index(inplace=True, drop=True)\n return statistics_df", "def concat_statistics(statistics_df, summary_df, total_df, sort_columns):\n \n # concatenate statistics dataframes\n statistics_df = pd.concat([statistics_df, summary_df])\n statistics_df.sort_values(by=sort_columns, inplace=True)\n statistics_df = pd.concat([statistics_df, total_df])\n # reset indexes in final statistics DataFrame\n statistics_df.reset_index(inplace=True, drop=True)\n return statistics_df", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def reshape_data(tbl):\n\n \n # Indexes where the sentece starts\n sentStarters = tbl.loc[tbl['index'] == 'I1']\n\n # Add indicator for group and fill that forward for the group\n tbl.loc[tbl['index'] == 'I1', 'sent'] = range(sentStarters.shape[0])\n tbl['sent'] = tbl['sent'].fillna(method='ffill')\n\n def reshape_recipe(recipe):\n tokens = [token for token in recipe['token']]\n tags = [tag for tag in recipe['tag']]\n return pd.DataFrame({'sents': [tokens], 'tags': [tags]})\n\n return tbl.groupby('sent').apply(reshape_recipe)", "def concat_all_dataframes(*args):\n return reduce(DataFrame.unionAll, args)", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0" ]
[ "0.5980725", "0.5887287", "0.5851793", "0.5812862", "0.5757257", "0.57415146", "0.57319605", "0.5689936", "0.56405115", "0.56170523", "0.56154037", "0.5591578", "0.5586357", "0.5564776", "0.55478466", "0.554424", "0.5535979", "0.54988015", "0.5480869", "0.54719794", "0.5454091", "0.5413194", "0.53967744", "0.53953075", "0.5393822", "0.53816986", "0.53804", "0.5370093", "0.5324112", "0.5324096", "0.53200185", "0.5314272", "0.53004473", "0.5284426", "0.52724266", "0.52572775", "0.5252359", "0.524964", "0.52330816", "0.5229393", "0.52265584", "0.5222535", "0.52006125", "0.5195911", "0.5195562", "0.5193467", "0.5188534", "0.5188419", "0.51554865", "0.5149674", "0.5149438", "0.5113603", "0.51133734", "0.5101574", "0.5099493", "0.50975734", "0.5092474", "0.50828815", "0.50709224", "0.5068727", "0.5068698", "0.50573814", "0.5057295", "0.50569034", "0.5055531", "0.5052348", "0.5039888", "0.5024782", "0.5009993", "0.50093204", "0.5002774", "0.49889696", "0.4987197", "0.4977417", "0.4976633", "0.49741873", "0.49637204", "0.49635044", "0.4953671", "0.49500415", "0.49472237", "0.4944154", "0.4940979", "0.4934478", "0.492235", "0.49221307", "0.4920108", "0.49159628", "0.49041158", "0.4899154", "0.48977518", "0.48944384", "0.48936167", "0.48901168", "0.48823607", "0.48823607", "0.48821995", "0.4881031", "0.48785275", "0.4874625" ]
0.66242844
0
stack pandas Series logically into a DataFrame
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame: return pd.concat(serieses, axis="columns").T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df", "def expand_series(ser, columns):\n return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)", "def binarize(series):\n name = series.name\n df = pd.DataFrame()\n for category in series.value_counts().index:\n df[category] = (series == category)\n return df", "def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )", "def sample_series(self, series, append_frame=None):\n\n columns, values = self.get_readings(series)\n\n dataframe = DataFrame(values, columns=columns)\n dataframe = self.format_index(dataframe, self.ENERGY_DB_INDEX)\n\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n if append_frame is not None:\n # dataframe = pandas.concat([dataframe, input_frame], axis=1, join='inner', join_axes=[input_frame.index])\n dataframe = pandas.merge(append_frame, dataframe, on=['time', 'time'])\n # print(dataframe)\n\n return dataframe", "def pandas_series(arr, nan_to_null=False):\n import pandas as pd\n return pd.Series(arr, copy=False)", "def CombineSeries(*args):\r\n df = pd.concat([*args], axis=1)\r\n\r\n return df", "def to_series(self) -> pd.Series:\n df = self.to_dataframe(\"* values *\")\n dims = self.dims_list\n if len(dims) == 1:\n dims = dims[0]\n return df.set_index(dims)[\"* values *\"]", "def stack_table(A: pd.DataFrame) -> pd.DataFrame:\r\n A = pd.DataFrame(A.stack(dropna=False))\r\n A.columns = ['factor']\r\n return A", "def make_series(x, y, **options):\n underride(options, name='values')\n if isinstance(y, pd.Series):\n y = y.values\n series = pd.Series(y, index=x, **options)\n series.index.name = 'index'\n return series", "def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)", "def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr", "def date_features(s: pd.Series, result: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n if result is None:\n result = pd.DataFrame(s, copy=False)\n index = cast(pd.DatetimeIndex, s.index)\n\n result[\"year\"] = index.year\n result[\"month\"] = index.month\n result[\"day\"] = index.day\n result[\"dayofweek\"] = index.dayofweek\n result[\"dayofyear\"] = index.dayofyear\n result[\"quarter\"] = index.quarter\n result[\"season\"] = _map(index.month, _SEASON_MAP)\n result[\"weekofyear\"] = index.weekofyear\n try:\n # Work around numpy Deprecation Warning about parsing timezones\n # by converting to UTC and removing the tz info.\n dates = index.tz_convert(None).to_numpy()\n except TypeError:\n # No timezone.\n dates = index.to_numpy()\n first_of_month = pd.to_datetime(dates.astype(\"datetime64[M]\"))\n week_of_month = np.ceil((first_of_month.dayofweek + index.day) / 7.0)\n result[\"weekofmonth\"] = week_of_month.astype(int)\n # result[\"is_holiday\"] = ?\n # result[\"holiday_types\"] = ?\n result[\"is_weekend\"] = index.dayofweek >= 5\n result[\"is_leap_year\"] = index.is_leap_year\n result[\"is_leap_day\"] = (index.month == 2) & (index.day == 29)\n result[\"is_month_end\"] = index.is_month_end\n result[\"is_quarter_end\"] = index.is_month_end & (index.month % 4 == 3)\n\n return result", "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})", "def to_work_series(self, data: pd.Series) -> pd.Series:\n ...", "def as_series(self, arraylike: Iterable) -> pd.Series:\n return pd.Series(arraylike, index=self.data.index)", "def as_series(self) -> \"pd.Series\":\n import pandas as pd\n\n data = {\"_row_id\": self.id, \"_row_num\": self.num, **self.as_dict()}\n series = pd.Series(data)\n return series", "def index_reformat(series: pd.Series, preserve_order: bool) -> pd.DataFrame:\n series = series.copy()\n series = rewrite_index(series)\n series.index = remove_constant_levels(series.index)\n series.index.names = [LEVEL_NAMES.get(name, name) for name in series.index.names]\n series = series.rename(index=pretty_rewrite)\n\n # Preserve order of inputs\n df = series.unstack(\"Target\")\n if preserve_order:\n df = df.reindex(columns=series.index.get_level_values(\"Target\").unique())\n for level in series.index.names:\n kwargs = {}\n if isinstance(df.index, pd.MultiIndex):\n kwargs = dict(level=level)\n if level != \"Target\":\n df = df.reindex(index=series.index.get_level_values(level).unique(), **kwargs)\n else:\n df = df.sort_index()\n return df", "def to_pandas(self, **kwargs) -> pd.Series | pd.DataFrame:\n\n if self.n_items != 1:\n return self.to_dataframe(**kwargs)\n else:\n return self[0].to_pandas(**kwargs)", "def to_pandas_series_rdd(self):\n pd_index = self.index().to_pandas_index()\n return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))", "def _wrap_in_pandas_container(\n data_to_wrap,\n *,\n columns,\n index=None,\n):\n if issparse(data_to_wrap):\n raise ValueError(\"Pandas output does not support sparse data.\")\n\n if callable(columns):\n try:\n columns = columns()\n except Exception:\n columns = None\n\n pd = check_pandas_support(\"Setting output container to 'pandas'\")\n\n if isinstance(data_to_wrap, pd.DataFrame):\n if columns is not None:\n data_to_wrap.columns = columns\n return data_to_wrap\n\n return pd.DataFrame(data_to_wrap, index=index, columns=columns, copy=False)", "def series_view(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.view)(self, **kwargs)", "def __call__(self, index: pd.Index) -> pd.DataFrame:\n if not isinstance(self.values, (list, tuple)):\n values = pd.Series(\n self.values,\n index=index,\n name=self.value_columns[0] if self.value_columns else None,\n )\n else:\n values = dict(\n zip(self.value_columns, [pd.Series(v, index=index) for v in self.values])\n )\n return pd.DataFrame(values)", "def _convert_df_to_series(df):\n if isinstance(df, pd.DataFrame) and df.shape[1] == 1:\n return df.iloc[:, 0]\n elif isinstance(df, pd.DataFrame) and df.shape[1] > 1:\n raise TypeError('DataFrame cannot be converted to a Series as it contains more than 1 column.')\n return df", "def transform_series(obj):\n vals = obj.values\n return transform_array(vals)", "def SweepFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)", "def sandwich(self, s):\r\n s_ = pd.DataFrame(s)\r\n index = s_.index\r\n sandwich_ = sandwich(self.data.values, s_.values)\r\n if len(sandwich_.shape) == 0: \r\n sandwich_ = [sandwich_]\r\n sandwich_ = pd.DataFrame(sandwich_, index=index, columns=index)\r\n return self.__class__(sandwich_)", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def _timeseries_to_dataframe_value(timeseries, name):\n # Column headers\n columns = [\n [name],\n [timeseries.instance_or_contract_dataframe_column_header()],\n ['']\n ]\n # Convert a time series of (date, value)\n df = pd.DataFrame.from_records(\n ((v.value,) for v in timeseries),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df", "def from_series(cls, s: pd.Series, schema: Schema) -> \"Flat\":\n if isinstance(s.index, pd.MultiIndex):\n dims = s.index.names\n else:\n if not s.index.name:\n err_msg = (\n \"Series index does not have a name. Unable to infer dimension.\\n\"\n \"When creating the series, ensure the index has a name:\\n\"\n \"s = pd.Series([1, 2, 3], index=pd.Index(['S', 'M', 'L'], name='size'))\"\n )\n raise TypeError(err_msg)\n dims = [s.index.name]\n df = s.to_frame(\"* value *\").reset_index()\n return cls.from_dataframe(df, schema, dims, \"* value *\")", "def to_series(func):\n\n @wraps(func)\n def add_series(center, home_center):\n normed_center = func(center.x, center.y, home_center)\n return pd.Series(normed_center, index=[\"x_normed\", \"y_normed\"])\n\n return add_series", "def leftshift_series(series):\n leftshifted_x = [(series.index[idx] - series.index[0]).days for idx in range(len(series.index))]\n return pd.Series(data=series.values, index=leftshifted_x)", "def object_values_series() -> pd.Series:\n series = pd.Series(data=list(string.ascii_uppercase), index=range(101,127))\n return series", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def _timeseries_to_dataframe_scenarios(timeseries, name):\n width = timeseries.total_values_per_item()\n # Column headers\n columns = [\n [name] * width,\n [timeseries.instance_or_contract_dataframe_column_header()] * width,\n timeseries.scenario_names\n ]\n # Convert a time series of (date, scenarios[])\n df = pd.DataFrame.from_records(\n (v.scenarios for v in timeseries.data),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df", "def series_to_supervised(self,data, n_in=1, n_out=1, dropnan=True): \n n_vars = 1 if type(data) is list else data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n self.agg=agg\n return agg", "def _timeseries_to_dataframe_mean_and_scenarios(timeseries, name):\n width = timeseries.total_values_per_item()\n # Column headers\n columns = [\n [name] * width,\n [timeseries.instance_or_contract_dataframe_column_header()] * width,\n [''] + timeseries.scenario_names\n ]\n # Convert a time series of (date, scenarios[])\n df = pd.DataFrame.from_records(\n ((v.value, *v.scenarios) for v in timeseries.data),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df", "def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n from pyspark.pandas import DataFrame\n\n groupby = self._groupby\n psdf = groupby._psdf\n\n # Here we need to include grouped key as an index, and shift previous index.\n # [index_column0, index_column1] -> [grouped key, index_column0, index_column1]\n new_index_scols: List[Column] = []\n new_index_spark_column_names = []\n new_index_names = []\n new_index_fields = []\n for groupkey in groupby._groupkeys:\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(groupkey.spark.column.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(groupkey._column_label)\n new_index_fields.append(groupkey._internal.data_fields[0].copy(name=index_column_name))\n\n for new_index_scol, index_name, index_field in zip(\n psdf._internal.index_spark_columns,\n psdf._internal.index_names,\n psdf._internal.index_fields,\n ):\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(new_index_scol.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(index_name)\n new_index_fields.append(index_field.copy(name=index_column_name))\n\n if groupby._agg_columns_selected:\n agg_columns = groupby._agg_columns\n else:\n # pandas doesn't keep the groupkey as a column from 1.3 for DataFrameGroupBy\n column_labels_to_exclude = groupby._column_labels_to_exclude.copy()\n if isinstance(groupby, DataFrameGroupBy):\n for groupkey in groupby._groupkeys: # type: ignore[attr-defined]\n column_labels_to_exclude.add(groupkey._internal.column_labels[0])\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in column_labels_to_exclude\n ]\n\n applied = []\n for agg_column in agg_columns:\n applied.append(agg_column._with_new_scol(func(agg_column.spark.column))) # TODO: dtype?\n\n # Seems like pandas filters out when grouped key is NA.\n cond = groupby._groupkeys[0].spark.column.isNotNull()\n for c in groupby._groupkeys[1:]:\n cond = cond | c.spark.column.isNotNull()\n\n sdf = psdf._internal.spark_frame.filter(cond).select(\n new_index_scols + [c.spark.column for c in applied]\n )\n\n internal = psdf._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in new_index_spark_column_names],\n index_names=new_index_names,\n index_fields=new_index_fields,\n column_labels=[c._column_label for c in applied],\n data_spark_columns=[\n scol_for(sdf, c._internal.data_spark_column_names[0]) for c in applied\n ],\n data_fields=[c._internal.data_fields[0] for c in applied],\n )\n\n return groupby._handle_output(DataFrame(internal))", "def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]", "def basic_series() -> pd.Series:\n series = pd.Series(range(1,6), name=\"Fred\")\n return series", "def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df", "def series_to_supervised(data, n_in=1, delta_in=1, n_out=1,delta_out=1, dropnan=True):\n \n if (type(data) is list) or (type(data) is pd.Series):\n n_vars = 1\n else:\n n_vars =data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -delta_in):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out, delta_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg", "def process_and_merge(s):\n l = [preprocessing(df) for df in s]\n d = {x.name: x for x in l}\n df = pd.DataFrame(d)\n df.index.names = [x.lower() for x in df.index.names]\n return pd.DataFrame(d)", "def create_shifted_df(df: pd.DataFrame, periods: int = 1) -> pd.DataFrame:\n data_df_shifted = df.shift(periods=periods)\n data_df_shifted = data_df_shifted.combine_first(df).add_suffix(\"_shifted\")\n return pd.concat([df, data_df_shifted], axis=1, join=\"inner\").reset_index(\n drop=True\n )", "def datetime_features(\n s: pd.Series, result: Optional[pd.DataFrame] = None\n) -> pd.DataFrame:\n result = date_features(s, result)\n return time_features(s, result)", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n\n return agg", "def prepare_arrays(series: pd.Series) -> np.array:\n\n series = series.map(string_to_array)\n\n # transform the array of array into a 2d-array\n return np.stack(np.array(series.array))", "def _metrics_to_series(metrics) -> pd.Series:\n row = pd.Series(metrics[\"latency\"])\n for index, value in row.items():\n row[index] = value / 1000000.0\n\n row[\"qps\"] = metrics[\"qps\"]\n row[\"completed_queries\"] = metrics[\"completed_queries\"]\n row[\"failed_queries\"] = metrics[\"failed_queries\"]\n row[\"scenario\"] = metrics[\"scenario\"]\n\n if \"actual_qps\" in metrics:\n row[\"actual_qps\"] = metrics[\"actual_qps\"]\n\n return row", "def dictToSeries(df : pd.DataFrame, column : str):\n # Convert those columns with dictionaries in each row to pd.Series\n df = pd.concat([df, df[column].apply(pd.Series)], axis=1)\n df = df.drop(column, axis=1)\n\n return df", "def broadcast_merge(s,df):\n \n return pd.merge(pd.DataFrame(data=[s.values]*len(df),\n columns=s.index,\n index=df.index),\n df, left_index=True, right_index=True)", "def float_series() -> pd.Series:\n series = pd.Series([(n/1000) for n in range(1001)])\n return series", "def series_to_supervised(data, n_in=1, delta_in=1, n_out=1,delta_out=1, dropnan=True):\n\n if (type(data) is list) or (type(data) is pd.Series):\n n_vars = 1\n else:\n n_vars =data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -delta_in):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out, delta_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n return agg.dropna()\n else:\n return agg", "def get_ts_df( N):\n df = pd.DataFrame()\n df['value'] = N.reshape(-1)\n df['time'] = list(range( N.shape[1])) * N.shape[0]\n df['unit'] = np.repeat( range( N.shape[0]), N.shape[1])\n return df", "def to_df(self):\n if self.shape > 1:\n range_str = [s for s in range(self.shape)]\n iterables = [self.columns, range_str]\n multiindex = pd.MultiIndex.from_product(iterables, names=['song', 'frame'])\n # multiindex = [i for i in itertools.product(self.columns, range_str, repeat=1)]\n df = pd.DataFrame(columns=multiindex, index=self.columns, dtype=np.float64)\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n for s in range_str:\n df.loc[c_1][c_2, s] = self.dict_[c_1][c_2][s]\n df = df.T\n else:\n df = pd.DataFrame(columns=self.columns + ['song'], dtype=np.float64)\n df['song'] = self.columns\n df = df.set_index('song')\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n df.loc[c_1, c_2] = self.max_diff(c_1, c_2)\n\n return df", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg", "def stack(Timeseries, remove_ramp='linear', signalmask='mask.npy'):\n\n self = Timeseries\n datatype = np.dtype('<f4')\n width = self.Set.Width\n length = self.Set.Length\n\n cumTime = np.zeros((length,width), dtype=datatype)\n cumDef = np.zeros((length,width), dtype=datatype)\n\n if signalmask:\n signal = np.load(signalmask)\n\n for ig in self.Set:\n data = np.load(ig.ProcName)\n indGood = -np.isnan(data) # Assumes no special mask (e.g. coherence etc.)\n\n if remove_ramp != None:\n ramp = roipy.tools.calc_ramp(data, 'linear', custom_mask=signal)\n # Save a copy\n outdata = ig.ProcName.replace('d_', 'ramp_')\n if not os.path.exists(outdata):\n np.save(outdata, ramp.data) #mask array\n\n data_r = data-ramp\n outname = ig.ProcName.replace('d_', 'rd_')\n if not os.path.exists(outname):\n np.save(outname, data_r.data) #mask array\n\n cumTime[indGood] += float(ig.Timespan) #uplift positive\n cumDef[indGood] += data_r[indGood]\n\n #stack = stack * (5.62/4*np.pi) #done already in load_interferograms_binary\n stack = cumDef / cumTime\n\n return stack, cumDef, cumTime", "def array_1d_to_df(vel_array, dt, ss_code, ss_config, blank, bin_size, first_ens_num, last_ens_num):\n\n # Dictionary to create dataframe\n # Faster than appending to a dataframe\n dict_result = {}\n\n # A counter to use to add entries to dict\n i = 0\n\n if vel_array:\n # Go through each bin and beam\n for bin_num in range(len(vel_array)):\n # Get the bin depth\n bin_depth = Ensemble.get_bin_depth(blank, bin_size, bin_num)\n\n # Get the value\n value = vel_array[bin_num]\n\n # Create a dict entry\n dict_result[i] = {'time_stamp': dt,\n 'ss_code': ss_code,\n 'ss_config': ss_config,\n 'bin_num': bin_num,\n 'beam_num': 0,\n 'bin_depth': bin_depth,\n 'first_ens_num': first_ens_num,\n 'last_ens_num': last_ens_num,\n 'value': value}\n\n # Increment index\n i = i + 1\n\n # Create the dataframe from the dictionary\n # important to set the 'orient' parameter to \"index\" to make the keys as rows\n df = pd.DataFrame.from_dict(dict_result, \"index\")\n\n return df", "def allocate_series_dataframes(network, series):\n for component, attributes in series.items():\n df = network.df(component)\n pnl = network.pnl(component)\n\n for attr in attributes:\n pnl[attr] = pnl[attr].reindex(\n columns=df.index,\n fill_value=network.components[component][\"attrs\"].at[attr, \"default\"],\n )", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def _wrap_result(data, columns, index_col=None, coerce_float=True,\n parse_dates=None):\n\n frame = DataFrame.from_records(data, columns=columns,\n coerce_float=coerce_float)\n\n _parse_date_columns(frame, parse_dates)\n\n if index_col is not None:\n frame.set_index(index_col, inplace=True)\n\n return frame", "def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame:\n return pd.concat(dataframes).reset_index(drop=True)", "def pack_result_data(mean, upper, lower, x):\n if len(upper) == 0 and len(lower) == 0:\n upper = mean\n lower = mean\n d = {\"mean\": mean, \"upper\": upper, \"lower\": lower, \"x\": x}\n return pd.DataFrame(data=d)", "def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)", "def get_dataset_from_series(dataset_pproc, n_hours):\n # obtenemos los valores como una matriz\n values = dataset_pproc.values\n # ensure all data is float\n values = values.astype(\"float32\")\n # normalize features\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled = scaler.fit_transform(values)\n scaled = pd.DataFrame(\n scaled, columns=dataset_pproc.columns\n ) # frame as supervised learning\n\n reframed = series_to_supervised(scaled, n_hours, 1)\n return reframed, scaler", "def timeseries_list_to_dataframe(timeseries_list):\n # Checks\n assert_pandas_installed()\n assert timeseries_list, \"timeseries list is empty\"\n for index, timeseries in enumerate(timeseries_list):\n assert isinstance(timeseries, _get_timeseries_class()), (\n f\"timeseries_list[{index}] must be an instance of \"\n f\"energyquantified.data.Timeseries, but was: {type(timeseries)}\"\n )\n # Merge into one data frame\n return pd.concat([ts.to_dataframe() for ts in timeseries_list], axis=1)", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n\tn_vars = 1 if type(data) is list else data.shape[1]\n\tdf = DataFrame(data)\n\tcols, names = list(), list()\n\t# input sequence (t-n, ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t\tnames += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t\tif i == 0:\n\t\t\tnames += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n\t\telse:\n\t\t\tnames += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n\tagg = concat(cols, axis=1)\n\tagg.columns = names\n\t# drop rows with NaN values\n\tif dropnan:\n\t\tagg.dropna(inplace=True)\n\treturn agg", "def SweepSeries(*args, **kwargs):\n if args or kwargs:\n underride(kwargs, dtype=float)\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=np.float64)\n\n series.index.name = 'Parameter'\n if 'name' not in kwargs:\n series.name = 'Metric'\n return series", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = data\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [\"{}_t-{}\".format(var_name, i) for var_name in df.columns]\n\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [\"{}_t\".format(var_name) for var_name in df.columns]\n else:\n names += [\"{}_t+{}\".format(var_name, i) for var_name in df.columns]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg", "def _dataframe_conversion(da, order):\n assert da.data.squeeze().ndim == 2, (\n \"Dataframe conversion only possible for connectivity arrays when \"\n \"time dimension is missing\")\n da = da.squeeze().to_dataframe('mi').reset_index()\n da = da.pivot('sources', 'targets', 'mi')\n if isinstance(order, (list, np.ndarray)):\n da = da.reindex(order, axis='index').reindex(order, axis='columns')\n\n return da", "def reshape_data(tbl):\n\n \n # Indexes where the sentece starts\n sentStarters = tbl.loc[tbl['index'] == 'I1']\n\n # Add indicator for group and fill that forward for the group\n tbl.loc[tbl['index'] == 'I1', 'sent'] = range(sentStarters.shape[0])\n tbl['sent'] = tbl['sent'].fillna(method='ffill')\n\n def reshape_recipe(recipe):\n tokens = [token for token in recipe['token']]\n tags = [tag for tag in recipe['tag']]\n return pd.DataFrame({'sents': [tokens], 'tags': [tags]})\n\n return tbl.groupby('sent').apply(reshape_recipe)", "def stack_stats_tbl(arrs, nodata=None): # col_names, args):\r\n stats = stack_stats(arrs, ax=(1, 2), nodata=nodata)\r\n d = [(i, '<f8')\r\n for i in ['Sum', 'Min', 'Mean', 'Med', 'Max', 'Std', 'Var']]\r\n dts = [('Band', '<i4'), ('N', '<i4'), ('N_nan', '<i4')] + d\r\n N, r, c = arrs.shape\r\n cols = len(dts)\r\n z = np.empty(shape=(N,), dtype=dts)\r\n z[z.dtype.names[0]] = np.arange(0, N)\r\n z[z.dtype.names[1]] = np.array([r*c]*N)\r\n z[z.dtype.names[2]] = np.count_nonzero(arrs == nodata, axis=(1, 2))\r\n for i in range(cols-3):\r\n z[z.dtype.names[i+3]] = stats[i]\r\n return z", "def tuples_to_df(ts):\n return pd.DataFrame.from_records(ts, columns=['user', 'item', 'rating'])", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True, stride=None, dates=False, leaks=True):\n df = pd.DataFrame(data)\n \n time = None\n if 'date' in df.columns:\n time = 'date'\n elif 'time' in df.columns:\n time = 'time'\n if time != None:\n df = df.drop([time], axis=1)\n \n if 'leak' in df.columns:\n df = df.drop(['leak'], axis=1) \n n_vars = df.shape[1]\n times_column = list()\n if dates and time != None:\n times_column = data[time]\n del data\n \n cols, names, pivots = list(), list(), list()\n \n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n agg = pd.concat(cols, axis=1)\n \n agg.columns = names\n\n #stride - delete windows\n if stride != None:\n indexes_to_drop = list()\n for i in range(stride, agg.shape[0], stride):\n print(\"index\", i)\n pivots += [i]\n \n onset = 0\n offset = pivots[0]\n for i in range(0, len(pivots)):\n print(\"onset\", onset)\n print(\"offset\", offset)\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n try:\n onset = pivots[i] + 1\n offset = pivots[i+1]\n \n except IndexError:\n onset = pivots[i] + 1\n offset = agg.shape[0]\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n \n \n \n print(\"indexes_to_drop\", indexes_to_drop)\n \n agg.drop(df.index[indexes_to_drop], inplace=True)\n \"\"\"\n if dates and time!=None:\n agg[time] = times_column\n \"\"\" \n # drop rows with NaN values \n if dropnan:\n agg.dropna(inplace=True)\n \n\n return agg", "def to_pandas(df):\n pd_df = pd.concat(ray.get(df._df))\n pd_df.index = df.index\n pd_df.columns = df.columns\n return pd_df", "def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass", "def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def get_series(gval, series):\n minlen = min([len(d[series]) for f, d in gval])\n return np.stack([d[series][:minlen] for f, d in gval])", "def generate_store_mmp_series(self, sgl_or_dbl_or_both='single', apply_pre_filter=False):\n\n series_data = []\n\n if sgl_or_dbl_or_both == 'single' or sgl_or_dbl_or_both == 'both':\n series_data_sgl = [(a, b, c, d, e, f) for a, b, c, d, e, f in\n self._iterator_mmp_series_numeric(apply_pre_filter=apply_pre_filter)]\n series_data.extend(series_data_sgl)\n # print series_data\n\n if sgl_or_dbl_or_both == 'double' or sgl_or_dbl_or_both == 'both':\n series_data_dbl = [(a, b, c, d, e, f) for a, b, c, d, e, f in\n self._iterator_mmp_series_numeric(sgl_or_dbl='double',\n apply_pre_filter=apply_pre_filter)]\n series_data.extend(series_data_dbl)\n # print series_data\n\n self.series_df = pd.DataFrame(series_data,\n columns=['SERIES_ID', 'SERIES_SEQ_ID',\n 'CONTEXT_ID', 'FRAG_ID',\n 'MOL_ID', 'ACTIVITY']\n )\n # self.series_df.set_index(['SERIES_ID', 'FRAG_ID'], inplace=True)\n\n # print('Parsed series CSV to dataframe of size %d, %d' % (self.series_df.shape[0], self.series_df.shape[1]))\n self.logger.info('Parsed series CSV to dataframe of size %d, %d' %\n (self.series_df.shape[0], self.series_df.shape[1]))", "def series_to_supervised(data, n_in=1, n_out=1, dropNaN=True):\n\tn_vars = 1 if type(data) is list else data.shape[1]\n\tdf = DataFrame(data)\n\tcols, names = list(), list()\n\n\t# input sequence (t-n ... ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t\tnames += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\n\t#forecast sequence (t, t+1, ... ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t\tif i == 0:\n\t\t\tnames += [(\"var%d(t)\" % (j+1)) for j in range(n_vars)]\n\t\telse:\n\t\t\tnames += [(\"var%d(t+%d)\" % (j+1, i)) for j in range(n_vars)]\n\n\t# put everything together\n\tagg = concat(cols, axis=1)\n\tagg.columns = names\n\n\t# drop rows with NaN values\n\tif dropNaN:\n\t\tagg.dropna(inplace=True)\n\treturn agg", "def pairs_to_df(pairs, columns=['source', 'target']):\n\n edges = pd.DataFrame(data=pairs, columns=columns)\n return edges", "def vstack(A, *B):\n A, *B = A.unify(*B, fmt='dense')\n return DomainMatrix.from_rep(A.rep.vstack(*(Bk.rep for Bk in B)))", "def _get_column_as_pandas_series(self, key):\n result = self.getitem_array([key]).to_pandas().squeeze(axis=1)\n if not isinstance(result, pandas.Series):\n raise RuntimeError(\n f\"Expected getting column {key} to give \"\n + f\"pandas.Series, but instead got {type(result)}\"\n )\n return result", "def store_predictions(df):\n ts = df[df.columns[1]]\n base = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted '+ df.columns[1])\n base.index = df['year']\n base = base.append(pd.DataFrame(preds), sort = True)\n for col in df.columns[2:]:\n ts = df[col]\n temp = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted ' + col)\n temp.index = df['year']\n temp = temp.append(pd.DataFrame(preds), sort = True)\n base = base.join(temp)\n return base", "def _xml_to_series(self, xml):\n d = self._xml_to_dict(xml)\n d = d['Data']\n if isinstance(d, basestring):\n raise EmptyXMLError('Provided xml has no content.')\n shows = d['Series'] #list of dicts\n if self.get_first:\n if isinstance(shows, list):\n return Series(**shows[0])\n else:\n return Series(**shows)\n else:\n if isinstance(shows, list): \n return [Series(**v) for v in shows]\n else:\n return [Series(**shows)]", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def get_products_for_train(s: pd.Series) -> np.ndarray:\n return s.notnull().values", "def prepare_ts(s):\n s = s.rename_axis('ds')\n out = s.rename('y').reset_index()\n return out", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def xyz_triple_as_pandas_df(numpy_xyz):\n\t\t\n\tindx=np.arange(0, numpy_xyz.shape[0])\n\txyz_df = pd.DataFrame(numpy_xyz, index=indx)\n\txyz_df.columns = ['x','y','z']\n\n\treturn xyz_df", "def _tree_getitem(cls, op):\n out_series = op.outputs[0]\n combine_size = options.combine_size\n chunks = op.inputs[0].chunks\n while len(chunks) > combine_size:\n new_chunks = []\n for i in range(0, len(chunks), combine_size):\n chks = chunks[i : i + combine_size]\n if len(chks) == 1:\n chk = chks[0]\n else:\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n chk = concat_op.new_chunk(chks, dtype=chks[0].dtype)\n chk_op = SeriesIndex(labels=op.labels, is_intermediate=True)\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n chk = chk_op.new_chunk(\n [chk],\n shape=(np.nan,),\n dtype=chk.dtype,\n index_value=parse_index(pd.RangeIndex(-1)),\n **kw,\n )\n new_chunks.append(chk)\n chunks = new_chunks\n\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n kw[\"index\"] = (0,)\n chk = concat_op.new_chunk(chunks, dtype=chunks[0].dtype, **kw)\n index_op = SeriesIndex(labels=op.labels)\n chunk = index_op.new_chunk([chk], dtype=chk.dtype, **kw)\n new_op = op.copy()\n nsplits = ((len(op.labels),),) if isinstance(op.labels, list) else ()\n kw = out_series.params\n kw[\"nsplits\"] = nsplits\n kw[\"chunks\"] = [chunk]\n return new_op.new_tileables(op.inputs, kws=[kw])", "def get_ts_df(self):\n df = pd.DataFrame(self.ts_list)\n df.columns = self.col_names\n df.sort_values(by=self.col_names[0], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def from_stack(cls, data_stack, index, columns, values, rows=10000000,\r\n kind='upper'):\r\n cov = segmented_pivot_table(data_stack, rows=rows, index=index,\r\n columns=columns, values=values)\r\n if kind == 'all':\r\n return cls(cov)\r\n else:\r\n return triu_matrix(cov, kind=kind)", "def listify(array, valuename, colnames):\n multiindex = pd.MultiIndex.from_product([range(i) for i in array.shape])\n colmapping = {\"level_\"+str(i): colnames[i] for i in range(len(colnames))}\n\n return pd.DataFrame({valuename: pd.Series(index=multiindex, data=array.flatten())}).reset_index().rename(colmapping, axis=1)", "def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)", "def stack(arrs):\n\treturn np.concatenate([a[...,np.newaxis] for a in arrs], axis=-1)", "def sif_to_series(sif_fl):\n sif_df = pd.read_csv(sif_fl, sep='\\t', header=None, index_col=0)\n uniq_producers = np.unique(sif_df.index)\n sif_series = pd.Series({producer: safelist(sif_df.ix[producer][2])\n for producer in uniq_producers})\n return sif_series", "def timeseries_dataframe(self):\n return" ]
[ "0.6693489", "0.64312357", "0.6289627", "0.61618704", "0.59844136", "0.59790695", "0.5950793", "0.5896647", "0.58869344", "0.58781815", "0.577991", "0.5733921", "0.56654763", "0.566396", "0.56531364", "0.56504464", "0.5612756", "0.56065685", "0.5595002", "0.5558159", "0.55369985", "0.55344975", "0.5499257", "0.54340345", "0.54299086", "0.5423657", "0.53765875", "0.53391176", "0.5338096", "0.533013", "0.5301024", "0.5293915", "0.52902526", "0.52895826", "0.52688056", "0.52546424", "0.5249879", "0.52488256", "0.524799", "0.52334917", "0.52287716", "0.5187648", "0.5156381", "0.5151598", "0.5150891", "0.51488274", "0.5146934", "0.5141803", "0.5123723", "0.5118352", "0.51181775", "0.5114691", "0.51116306", "0.50982445", "0.50954956", "0.50951713", "0.5094254", "0.5089113", "0.5081887", "0.5081631", "0.5071105", "0.5068193", "0.5054304", "0.50542414", "0.5050597", "0.5048978", "0.5040475", "0.50259554", "0.50222415", "0.50080407", "0.50019306", "0.49969485", "0.49906358", "0.4986488", "0.4978209", "0.49767178", "0.4975397", "0.4975397", "0.49712104", "0.49692568", "0.49640006", "0.49602753", "0.4959746", "0.49593335", "0.49591553", "0.49464673", "0.49296448", "0.49208897", "0.49204636", "0.4917442", "0.4915198", "0.4909452", "0.4893414", "0.48919272", "0.48866165", "0.4886315", "0.4885273", "0.48829097", "0.48812792", "0.4879416" ]
0.76580703
0
Process a full set of images, with parallelization if multiple CPU threads are available on this machine
def _process_images( raw_image_paths: pd.Series, raw_images_dir: str, ROI_definitions: Dict[str, Tuple], flat_field_filepath_or_none: Union[str, None], save_ROIs: bool, save_dark_frame_corrected_images: bool, save_flat_field_corrected_images: bool, ) -> Tuple[pd.DataFrame, pd.DataFrame]: def _process_image_local(raw_image_path): """ Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image. """ return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, ) with ThreadPoolExecutor() as executor: # We want identical warnings to be shown only for the first image they occur on (the default), # but we also want subsequent calls to process_experiment to start with a fresh warning store # so that warnings don't stop showing after the first run. # catch_warnings gives us this fresh warning store. with warnings.catch_warnings(): # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples roi_summary_data_and_image_diagnostics_dfs_for_files = list( tqdm( executor.map(_process_image_local, raw_image_paths), total=len(raw_image_paths), ) ) roi_summary_data_for_files, image_diagnostics_for_files = zip( *roi_summary_data_and_image_diagnostics_dfs_for_files ) roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files) image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files) return roi_summary_data_for_all_files, image_diagnostics_for_all_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_images_multiprocessed(images, clf, processes, vstep=15, hstep=15, dnum=5):\n pool = Pool(processes=processes) # start 4 worker processes\n results = []\n for i in range(0, processes):\n begin = i * int(len(images) / processes)\n if i == processes - 1:\n end = len(images)\n else:\n end = (i + 1) * int(len(images) / processes)\n results.append(pool.apply_async(scan_images, (images[begin:end], clf, begin, vstep, hstep, dnum)))\n detections = []\n for result in results:\n detections.append(result.get())\n return np.concatenate(detections)", "def process_images(pool, func, images, entries):\n start = time.perf_counter()\n images = pool.map(func, images)\n logger.info(\"Erased white background from %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def _process_image_files(name, cnts, roots, num_shards): \n \n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, sum(cnts), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, cnts, roots, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), sum(cnts)))\n sys.stdout.flush()", "def load_images_test():\n\n path = os.path.join('./test','*.jpg')\n files = glob.glob(path)\n\n x_test = []\n x_test_id = []\n x_test_shape = []\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n print(fl)\n flbase = os.path.basename(fl)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_test.append(result_list[0])\n x_test_id.append(flbase)\n #cv2.imshow(\"dst\", dst2)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n pool.close()\n return x_test, x_test_id", "def load_images_train():\n\n global pool\n x_train = []\n x_train_id = []\n y_train = []\n x_shape = []\n start_time = time.time()\n\n print(\"Reading train images\")\n folders = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']\n #folders = ['new']\n for fld in folders:\n index = folders.index(fld)\n print('Loading folder {} (Index: {})'.format(fld, index))\n path = os.path.join('./train1', fld, '*.jpg')\n files = glob.glob(path)\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n flbase = os.path.basename(fl)\n img = cv2.imread(fl,cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_train.append(result_list[0])\n x_train_id.append(flbase)\n y_train.append(index)\n #x_shape.append(shape)\n\n print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))\n pool.close()\n return x_train, y_train, x_train_id", "def process_images(self, images, mode='train'):\n if self.dataset_name == 'dataset1':\n return images[:5000]\n elif self.dataset_name == 'dataset2':\n return np.add(images, 745)\n elif self.dataset_name == 'dataset3':\n # concatenate three images into three-digit image\n if mode == 'train':\n return np.concatenate((images[:40000], images[10000:50000],\n images[20000:60000]), axis=1)\n elif mode == 'test':\n return np.concatenate((images[:8000], images[1000:9000],\n images[2000:10000]), axis=1)\n elif self.dataset_name == 'dataset4':\n # merge two images into one\n if mode == 'train':\n return images[:50000] + images[-50000:]\n elif mode == 'test':\n return images[:9000] + images[-9000:]\n else:\n return images", "def classify_all_images(cc):\n print 'Classify images'\n images = cc.d.images\n for img_idx in range(comm_rank, len(images), comm_size): # PARALLEL\n print 'classify image %d/%d at %d'%(img_idx/comm_size, len(images)/comm_size, comm_rank)\n img = images[img_idx]\n scores = classify_image(cc, img_idx)\n savefile = config.get_classifier_score_name(img, cc.L)\n cPickle.dump(scores, open(savefile,'w'))", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def process_bounded_image_files(name, filenames, labels, num_shards, num_threads, output_dir):\n assert len(filenames) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(filenames), num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = BoundingImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames)))\n sys.stdout.flush()", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def process(image):\n pass", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)", "def classify_pics(pic_q):\n while not pic_q.empty():\n try:\n print (\"Starting a batch of threads...\")\n threads = []\n for i in range(MAX_THREADS):\n if not pic_q.empty():\n picTuple = pic_q.get()\n t = Thread(target=classify_pic,\n args=(picTuple[0], picTuple[1], pic_q))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n finally:\n # write to DATA_FILE even when process is interrupted\n with open(DATA_FILE, 'w') as data_file:\n print (\"Rewriting %s with %s entries\" %\n (DATA_FILE, len(classify_data)))\n json.dump(classify_data, data_file)", "def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])", "def _compute_ij_images_numpy_parallel(\n src_x_image: np.ndarray,\n src_y_image: np.ndarray,\n src_i_min: int,\n src_j_min: int,\n dst_src_ij_images: np.ndarray,\n dst_x_offset: float,\n dst_y_offset: float,\n dst_x_scale: float,\n dst_y_scale: float,\n uv_delta: float\n):\n src_height = src_x_image.shape[-2]\n dst_src_ij_images[:, :, :] = np.nan\n for src_j0 in nb.prange(src_height - 1):\n _compute_ij_images_for_source_line(\n src_j0,\n src_x_image,\n src_y_image,\n src_i_min,\n src_j_min,\n dst_src_ij_images,\n dst_x_offset,\n dst_y_offset,\n dst_x_scale,\n dst_y_scale,\n uv_delta\n )", "def run_skim(self):\n # Split input into chunks for processin\n skim_files = glob.glob(self.args.input + \"*.root\")\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n pool.imap(_run_skim, skim_files)\n # Close and join pool\n pool.close()\n pool.join()", "def read_images(imagedir, size, ncores=mp.cpu_count()):\n _f = functools.partial(_image_worker, size=size)\n with mp.Pool(ncores) as pool:\n ret = pool.map(_f, get_files(imagedir))\n return {k: v for k,v in ret if v is not None}", "def train_batch_create_mp(imagedirs, classes, indices, image_key, offset_percent, output_size, nprocesses):\r\n batch_size = len(indices)\r\n n_classes = len(classes)\r\n # now create the output class and pixel arrays\r\n output_array = np.zeros((batch_size, output_size[0], output_size[1], output_size[2]), dtype=np.float32)\r\n class_array = np.zeros((batch_size, n_classes), dtype=np.int8)\r\n batch_data = [image_key[i] for i in indices]\r\n whole_minibatch_size = batch_size // nprocesses\r\n num_whole_minibatches = batch_size // whole_minibatch_size\r\n input_list = []\r\n for i in range(num_whole_minibatches):\r\n input_list.append(batch_data[whole_minibatch_size*i:whole_minibatch_size*(1+i)])\r\n if batch_size % nprocesses != 0:\r\n input_list.append(batch_data[whole_minibatch_size*num_whole_minibatches:])\r\n frozen_params = (imagedirs, classes, offset_percent, output_size)\r\n partial_worker = partial(batch_worker, frozen_params=frozen_params)\r\n # initializes the pool of processes\r\n print('building pool')\r\n pool = multiprocessing.Pool(nprocesses)\r\n # maps partial_worker and list of stars to the pool, stores used parameters in a list\r\n print('mapping pool')\r\n outputs = pool.map(partial_worker, input_list)\r\n # end the list of functions to go to pool\r\n pool.close()\r\n print('pool closed')\r\n # wait for all processes to return\r\n pool.join()\r\n print('pool joined')\r\n counter = 0\r\n for i in range(len(outputs)):\r\n current_output = outputs[i]\r\n pixel_data = current_output[0]\r\n class_data = current_output[1]\r\n num_fish = len(pixel_data)\r\n for lf in range(num_fish):\r\n output_array[counter, :, :, :] = np.reshape(pixel_data[lf], output_size)\r\n class_array[counter, :] = class_data[lf]\r\n counter += 1\r\n return output_array, class_array", "def _iter_images(self):\n raise NotImplementedError", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def process(self, image):", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def do_image_operations(self):\n def inner(future):\n self.done_callback()\n\n self.context.thread_pool.queue(\n operation=self.img_operation_worker,\n callback=inner\n )", "def batch_worker(minibatch_info, frozen_params):\r\n imagedirs = frozen_params[0]\r\n classes = frozen_params[1]\r\n offset_percent = frozen_params[2]\r\n output_size = frozen_params[3]\r\n nclass = len(classes)\r\n nfish = len(minibatch_info)\r\n class_onehot = np.zeros((nfish, nclass), dtype=np.int8)\r\n imdata = np.zeros((nfish, int(np.prod(output_size))))\r\n for i in range(nfish):\r\n current_fishtuple = minibatch_info[i]\r\n fish_type = current_fishtuple[-1]\r\n fish_type = fish_type.strip(\"'\")\r\n fish_directory = imagedirs[fish_type]\r\n imdata[i, :] = read_single_image(current_fishtuple, fish_directory, offset_percent, output_size)\r\n if fish_type != 'NoF':\r\n fish_type = 'FISH'\r\n fish_class = int(classes[fish_type])\r\n class_onehot[i, fish_class] = 1\r\n return imdata, class_onehot", "def extract(directory):\n global usersDict\n images = []\n\n for (dirpath, dirnames, filenames) in walk(directory):\n if not filenames:\n continue\n for file in filenames:\n img = Image(dirpath, file)\n images.append(img)\n # This will utilized all cores, good for single machine / VM, it is not a distributed solution\n pool = Pool(4, initializer, ())\n\n pool.map(model_processing, images)\n\n print('FINISHHH----', usersDict)\n for user in usersDict:\n print('DICTTT----', user.images)\n user.save()", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def scale_all_images(image_dir, ratio):\n pool = Pool(1)\n pool.starmap(scale_image, zip(\n image_dir, itertools.repeat(ratio)))\n pool.close()\n pool.join()", "def main():\n # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n # compression time. If read raw images during training, use 0 for faster IO speed.\n\n # create output folders if they don't already exist\n for dir in [save_folder, save_mask_folder,save_hist_plot_folder]:\n if dir != None:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('mkdir [{:s}] ...'.format(dir))\n\n else:\n # print('Folder [{:s}] already exists. Exit...'.format(save_folder))\n # sys.exit(1)\n pass # uncomment above two lines for ease of working, if necessary\n\n img_list = []\n for root, dirsfoo, file_list in sorted(os.walk(input_folder)): # +'/*SR.tif'\n for x in file_list: # assume only images in the input_folder\n if x.endswith(\"SR.tif\"):\n path = os.path.join(root, x) \n img_list.append(path)\n break\n # img_list = ['/data_dir/Scenes/20190619_191648_25_106f_3B_AnalyticMS_SR.tif'] # for testing\n def update(arg):\n pbar.update(arg)\n # img_list=img_list[238:270] # for testing\n pbar = ProgressBar(len(img_list))\n pool = Pool(n_thread) # (n_thread)\n for path in img_list:\n if input_mask_folder==None:\n path_mask=None\n else:\n path_mask=name_lookup(path) # lookup mask path\n pool.apply_async(worker,\n args=(path, save_folder, crop_sz, step, thres_sz, compression_level, path_mask, save_mask_folder),\n callback=update)\n pool.close()\n pool.join()\n print('All subprocesses done.')", "def multiprocess_filtered_images_to_tiles(display=False, save_summary=True, save_data=True, save_top_tiles=True,\n html=False, image_list=None):\n timer = Time()\n print(\"Generating tile summaries (multiprocess)\\n\")\n\n if save_summary and not os.path.exists(slide.TILE_SUMMARY_DIR):\n os.makedirs(slide.TILE_SUMMARY_DIR)\n\n # how many processes to use\n num_processes = min(multiprocessing.cpu_count(),5)#multiprocessing.cpu_count()\n pool = multiprocessing.Pool(num_processes)\n\n if image_list is not None:\n num_train_images = len(image_list)\n\n if num_processes > num_train_images:\n num_processes = num_train_images\n images_per_process = num_train_images / num_processes\n\n print(\"Number of processes: \" + str(num_processes))\n print(\"Number of training images: \" + str(num_train_images))\n\n tasks = []\n for num_process in range(1, num_processes + 1):\n start_index = (num_process - 1) * images_per_process + 1\n end_index = num_process * images_per_process\n start_index = int(start_index)\n end_index = int(end_index)\n if image_list is not None:\n sublist = image_list[start_index - 1:end_index]\n tasks.append((sublist, display, save_summary, save_data, save_top_tiles))\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(sublist))\n else:\n tasks.append((start_index, end_index, display, save_summary, save_data, save_top_tiles))\n if start_index == end_index:\n print(\"Task #\" + str(num_process) + \": Process slide \" + str(start_index))\n else:\n print(\"Task #\" + str(num_process) + \": Process slides \" + str(start_index) + \" to \" + str(end_index))\n\n # start tasks\n results = []\n for t in tasks:\n if image_list is not None:\n results.append(pool.apply_async(image_list_to_tiles, t))\n else:\n results.append(pool.apply_async(image_range_to_tiles, t))\n\n slide_names = list()\n tile_summaries_dict = dict()\n for result in results:\n image_nums, tile_summaries = result.get()\n slide_names.extend(image_nums)\n tile_summaries_dict.update(tile_summaries)\n print(\"Done tiling slides: %s\" % image_nums)\n\n if html:\n generate_tiled_html_result(slide_names, tile_summaries_dict, save_data)\n\n print(\"Time to generate tile previews (multiprocess): %s\\n\" % str(timer.elapsed()))", "def image_processor(self, img_arr):\n assert img_arr.dtype == np.uint8, \\\n f\"image_processor requires uint8 array but not {img_arr.dtype}\"\n img_arr = self.transformation.run(img_arr)\n if self.is_train:\n img_arr = self.augmentation.run(img_arr)\n img_arr = self.post_transformation.run(img_arr)\n\n return img_arr", "def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n labels, num_shards, output_dir):\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename = filenames[i]\n label = labels[i]\n\n try:\n image_buffer, height, width = _process_image(filename, coder)\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected error while decoding %s.' % filename)\n continue\n\n example = _convert_to_example(filename, image_buffer, label,\n height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()", "def run(images):\n sc = SparkContext()\n rdd = sc.parallelize(images, 16) \\\n .map(truncate).repartition(16)\n rdd = generate_Y_cb_cr_matrices(rdd)\n rdd = generate_sub_blocks(rdd)\n rdd = apply_transformations(rdd)\n rdd = combine_sub_blocks(rdd)\n\n ### BEGIN SOLUTION HERE ###\n # Add any other necessary functions you would like to perform on the rdd here\n # Feel free to write as many helper functions as necessary\n return rdd", "def process(self):\n if self.images == None:\n return None\n\n result = self.tf_session.run(\n self.output_tensor,\n feed_dict={self.input_tensor: np.array(self.images)})\n self.images = None\n return result", "def read_processed_images(mode, image_type):\n raw_data = read_image_data(mode, image_type)\n labels = read_label_data(mode, image_type)\n features = np.apply_along_axis(extract_features, 1, raw_data)\n return ProcessedImageData(features, labels, np.arange(len(features)))", "def process_image(self):\n pass", "def split_preprocess_jobs(preprocess_images_job, input_images, prefix):\n resized_images = []\n\n for i in range(len(input_images)):\n curr = i % len(preprocess_images_job)\n preprocess_images_job[curr].add_inputs(input_images[i])\n out_file = File(prefix + str(input_images[i]))\n preprocess_images_job[curr].add_outputs(out_file)\n resized_images.append(out_file)\n \n return resized_images", "def process_image(self):\n\n detect.main(self.nn_args)", "def process(\n self,\n image: np.array\n ) -> np.array:\n pass", "def read_images(fs, img_path_batch, mode=\"rb\"):\n result = []\n logging.info(\"Start to read images at {}\".format(socket.gethostname()))\n for (label, img_path) in img_path_batch:\n img = read_image(fs, img_path, mode)\n result.append((label, img))\n logging.info(\"Finish the reading of {} images on {}\".format(\n len(result), socket.gethostname()))\n return result", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def _compute_ij_images_numpy_sequential(\n src_x_image: np.ndarray,\n src_y_image: np.ndarray,\n src_i_min: int,\n src_j_min: int,\n dst_src_ij_images: np.ndarray,\n dst_x_offset: float,\n dst_y_offset: float,\n dst_x_scale: float,\n dst_y_scale: float,\n uv_delta: float\n):\n src_height = src_x_image.shape[-2]\n dst_src_ij_images[:, :, :] = np.nan\n for src_j0 in range(src_height - 1):\n _compute_ij_images_for_source_line(\n src_j0,\n src_x_image,\n src_y_image,\n src_i_min,\n src_j_min,\n dst_src_ij_images,\n dst_x_offset,\n dst_y_offset,\n dst_x_scale,\n dst_y_scale,\n uv_delta\n )", "def compute(self,n_speed,s_speed,msize,thershold):\n X=self.image\n for layer in self.CNN:\n #layer dictonary {type,kernel,bias,hparams}\n #offloading decisions\n kernel=layer[\"kernel\"]\n hparam=layer[\"hparams\"]\n if layer[l_type]==\"conv\":\n off_dec=offload(n_speed,s_speed,msize,X,kernel,hparam)\n if(off_dec.checkOffload(thershold)):\n #get the result form the server\n conv_dict={ \"data\":X,\"l_type\":layer[l_type],\"hpara\":hparam,\"pos\":0}\n c=client(conv_dict,self.edge[\"ip\"],self.edge[\"port\"])\n c.send()\n X=c.receive_array()\n \n else:\n X=self.thread_Compute(X,layer)\n\n else:\n X=self.thread_Compute(X,layer)", "def ImagePipeline(self,cnn_pipe = False, batch_index = None):\n\t\t\n\t\tif self.verbose:\n\t\t\tprint \"...createFolderStructure\"\n\n\t\tself.createFolderStructure()\n\n\t\tif self.verbose:\n\t\t\tprint \"...downloadImages\"\n\n\t\tself.downloadImages()\n\n\t\tif self.verbose:\n\t\t\tprint \"...binarize_classes\"\n\n\t\tclasses, lb = self.binarize_classes()\n\n\t\tif self.verbose:\n\t\t\tprint \"...load_paths_and_labels\"\n\n\t\tim_paths, im_labels = self.load_paths_and_labels(classes)\n\n\t\tif self.verbose:\n\t\t\tprint \"...load_images\"\n\t\t\t\t\n\t\t# Uncomment this if you just want to use one cpu\t\t\n\t\t#imlist = self.load_images(im_paths,cnn_pipe)\n\t\t#self.load_images(im_paths,[],[])\n\t\t#imlist = self.imlist\n\n\t\timlist, self.im_index = self.load_images_parallel(im_paths)\n\t\t#print len(imlist)\n\n\t\t# Sort the list by index so we don't have to do as many iteration in finding similar\n\t\t#if not cnn_pipe:\n\t\tzipped = zip(self.im_index, imlist)\n\t\tzipped_sorted = sorted(zipped, key=lambda x: x[0])\n\t\tself.im_index , imlist = zip(*zipped_sorted)\n\n\t\taverage_image = None\n\t\tif cnn_pipe:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"...calculate_average_image\"\n\t\t\taverage_image = self.calculate_average_image(imlist)\n\n\t\tif self.verbose:\n\t\t\tprint \"\\n...data_augmentation_and_vectorization\\n\"\n\t\t\n\t\t#print imlist\n\n\t\tX,Y = self.data_augmentation_and_vectorization(imlist,lb,im_labels,average_image)\n\n\t\toutput = open( self.data_path + 'im_index.pkl', 'wb')\n\t\tcPickle.dump(self.im_index, output,protocol=-1)\n\t\toutput.close()\n\n\t\tif self.verbose:\n\t\t\tprint \"...dimReductionSdA\"\n\t\t\n\t\tX = self.dimReductionSdA(X)\n\t\t# print X[0][0:3]\n\t\t# print X[1][0:3]\n\t\t#X = self.dimReduction(X)\n\n\t\toutput = open( self.data_path + 'X_compressed_'+str(batch_index)+'.pkl', 'wb')\n\t\tcPickle.dump(X, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open( self.data_path + 'im_index_' + str(batch_index) + '.pkl', 'wb')\n\t\tcPickle.dump(self.im_index, output,protocol=-1)\n\t\toutput.close()\n\n\t\tif cnn_pipe:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"\\n...create_train_validate_test_sets\\n\"\n\t\t\ttrain_set,valid_set,test_set = self.create_train_validate_test_sets(X, Y)\n\t\t\treturn train_set,valid_set,test_set\n\t\telse:\n\n\t\t\tif self.verbose:\n\t\t\t\tprint \"\\n...similarImages\\n\"\n\n\t\t\tdf, duplicated_images = self.similarImages(X)\n\t\t\n\t\t\treturn df,duplicated_images", "def main(self, path_4a_cobertura, move_img_bool, process_with_thread):\n all_rapideye = open(FILE_ALL_RAPIDEYE, 'r')\n imgs = all_rapideye.readlines()\n\n if process_with_thread:\n\n PrepareThreads.perform(\n imgs, self.__prepare_process_many_imgs, path_4a_cobertura,\n move_img_bool\n )\n\n else:\n for img in imgs:\n self.__prepare_process_many_imgs(\n path_4a_cobertura, img, move_img_bool, 'foot_1'\n )", "def _process_image_paths(\n self, image_paths: Iterable[str], *, use_cache: bool = True\n ) -> Iterator[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]:\n assert self.produce_featurized_images, (\n \"For _process_image_paths() to work, we need either a feature cache, or an image loader, \"\n \"an image featurizer, and a region detector.\"\n )\n\n batch: List[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]] = []\n unprocessed_paths: Set[str] = set()\n\n def yield_batch():\n # process the images\n paths = list(unprocessed_paths)\n images, sizes = self.image_loader(paths)\n with torch.no_grad():\n images = images.to(self.cuda_device)\n sizes = sizes.to(self.cuda_device)\n featurized_images = self.image_featurizer(images, sizes)\n detector_results = self.region_detector(images, sizes, featurized_images)\n features = detector_results.features\n coordinates = detector_results.boxes\n class_probs = detector_results.class_probs\n class_labels = detector_results.class_labels\n\n # store the processed results in memory, so we can complete the batch\n paths_to_tensors = {}\n for i, path in enumerate(paths):\n if class_probs:\n class_probs_tensor = class_probs[i]\n else:\n class_probs_tensor = None\n\n if class_labels:\n class_labels_tensor = class_labels[i]\n else:\n class_labels_tensor = None\n\n paths_to_tensors[path] = (\n features[i],\n coordinates[i],\n class_probs_tensor,\n class_labels_tensor,\n )\n\n # store the processed results in the cache\n if use_cache and self.write_to_cache:\n for path, (\n features,\n coordinates,\n class_probs,\n class_labels,\n ) in paths_to_tensors.items():\n basename = os.path.basename(path)\n self._feature_cache[basename] = features\n self._coordinates_cache[basename] = coordinates\n if class_probs is not None:\n self._class_probs_cache[basename] = class_probs\n if class_labels is not None:\n self._class_labels_cache[basename] = class_labels\n\n # yield the batch\n for b in batch:\n if isinstance(b, str):\n yield paths_to_tensors[b]\n else:\n yield b\n\n for image_path in image_paths:\n basename = os.path.basename(image_path)\n try:\n if use_cache:\n features: Tensor = self._feature_cache[basename]\n coordinates: Tensor = self._coordinates_cache[basename]\n class_probs: Optional[Tensor] = self._class_probs_cache.get(basename)\n class_labels: Optional[Tensor] = self._class_labels_cache.get(basename)\n if len(batch) <= 0:\n yield features, coordinates, class_probs, class_labels\n else:\n batch.append((features, coordinates, class_probs, class_labels))\n else:\n # If we're not using the cache, we pretend we had a cache miss here.\n raise KeyError\n except KeyError:\n if not (self.image_loader and self.region_detector and self.image_featurizer):\n if use_cache:\n raise KeyError(\n f\"Could not find {basename} in the feature cache, and \"\n \"image featurizers are not defined.\"\n )\n else:\n raise KeyError(\n \"Reading the feature cache is disabled, and image featurizers \"\n \"are not defined. I can't process anything.\"\n )\n batch.append(image_path)\n unprocessed_paths.add(image_path)\n if len(unprocessed_paths) >= self.image_processing_batch_size:\n yield from yield_batch()\n batch = []\n unprocessed_paths = set()\n\n if len(batch) > 0:\n yield from yield_batch()", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def batch_process(minID, maxID, side='blue', **kwargs):\r\n\r\n if side == 'both':\r\n sides = ['blue','red']\r\n else:\r\n sides = [side]\r\n for side in sides:\r\n for i in range(minID, maxID+1, 1):\r\n filename = '%s%04d.fits' % (side, i)\r\n if os.path.exists(filename):\r\n try:\r\n extract1D(i, side=side, **kwargs)\r\n except iraf.IrafError:\r\n # some errors just require you to try again...\r\n print 'Hit error, retrying...'\r\n extract1D(i, side=side, **kwargs)", "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def test_compare_serial_with_multiprocess(sidesweep_image_sequence):\n\n cc = Cwsim_container_from_ims(ims=sidesweep_image_sequence)\n\n serial_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image(im)\n t2 = time() - t1\n serial_times.append(t2)\n serial_mean = np.mean(serial_times)\n\n # prepare for multiprocess stuff\n cc.prepare_memory_bank_outside()\n test_im = sidesweep_image_sequence[1]\n cc.query_image_mp(test_im)\n multip_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image_mp(im)\n t2 = time() - t1\n multip_times.append(t2)\n multip_mean = np.mean(multip_times)\n print('Serial mean: {}, multip mean: {} - speedup = {}'.format(serial_mean, multip_mean,serial_mean / multip_mean))", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def preprocess_images(images_dir, image_dims, logger):\n find_str = images_dir + '/**/*.jpg'\n images = glob.glob(find_str, recursive=True)\n num_samples = get_num_samples(images_dir)\n\n # Load in the already processed file list\n proc_list_path = images_dir + '/processed_list.txt'\n if os.path.isfile(proc_list_path):\n with open(proc_list_path) as f:\n proc_list = f.read().split('\\n')\n else:\n proc_list = []\n \n i = 1\n for image in images:\n image_name = image.split('/')[-1]\n if image not in proc_list:\n logger.info(\"Processing %s\", \" {} - {}/{}\".format(\n image_name, i, num_samples))\n try:\n processed_image = ImageCheck.check_and_crop(image)\n except (ImageCheck.ObjectMissingError,\n ImageCheck.WormMissingError,\n ImageCheck.MultipleWormsError,\n ImageCheck.TooBlurryError) as e:\n logger.info(\"Processing Error: %s\",\n \"Image at: \\n{} \\n Produced error: {} \\n Removing\"\n \" image\".format(image, e))\n os.remove(image)\n i = i + 1\n continue\n cv2.imwrite(image, processed_image)\n with open(proc_list_path, 'a') as f:\n f.write(image + '\\n')\n else:\n logger.info(\"Skipping %s\", \" {} (already processed) - {}/{}\".format(\n image_name, i, num_samples))\n i = i + 1", "def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)", "def bulk_process_images(inputpath, outputpath, extension):\n\n for dirpath, dirnames, filenames in os.walk(inputpath):\n structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dirpath, file)\n dest = os.path.join(structure, file)\n img = load_and_preprocess_image(src)\n cv2.imwrite(dest, img)", "def haiku_multiprocessing(paths, num_processes=2):\n with Pool(num_processes) as pool:\n results = pool.map(single_process, paths)\n return results", "def calculatePixelMetricsMP(input_img, input_df, num_workers=8):\n\n manager = Manager()\n new_cir = manager.list()\n q = Queue()\n for index, row in input_df.iterrows():\n plot = row['plot']\n x = row['x']\n y = row['y']\n r = row['r']\n weight = row['weight']\n info = [plot, x, y, r, weight]\n q.put(info)\n workers = Pool(num_workers, calculatePixelMetricsQueue,(q, input_img, input_df, new_cir))\n workers.close()\n workers.join()\n \n header = ['plot', 'x', 'y', 'r', 'weight', 'core', 'inner', 'outer']\n print(len(new_cir))\n output_df = pd.DataFrame(list(new_cir), columns=header)\n return output_df", "def applyToBatch(batch, operation, parallel_safe):\n logging.info(\"Applying operation to batch...\"); t0 = time()\n if operation is None:\n logging.info(\"Nothing to do\")\n return batch\n \n if parallel_safe:\n ret = ProcessPool().map(operation, batch)\n else:\n ret = list(map(operation, batch))\n \n t = time()-t0\n logging.info(f\"Finished applying to batch in {t}s, average time per image was {t/len(batch)}s\")\n return ret", "def process_images(self):\n source_images = self.get_build_images()\n self.log.info(\"Got %s images for publishing. Processing..\", len(source_images))\n\n for image in source_images:\n self.fetch_image(image)\n\n for target in image.push_registries:\n for tag in image.release_tags:\n repository = \"%s/%s\" % (target, image.repository.name)\n self.tag_image(image, repository, tag)\n retry_count = 1\n while retry_count <= self.retry_limit:\n self.log.info(\"Pushing %s:%s to %s (%d/%d)\", repository, tag, target, retry_count, self.retry_limit)\n try:\n self.publish_image(target, repository, tag)\n break\n except ImagePushError as e:\n self.log.error(\"%s\", e.message)\n retry_count = retry_count + 1\n else:\n return False\n return True", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output", "def run(self, images):\n\n if not (isinstance(images, Images)):\n raise Exception('Input data must be Images or a subclass')\n\n if len(images.dims.count) not in set([2, 3]):\n raise Exception('Number of image dimensions %s must be 2 or 3' % (len(images.dims.count)))\n\n self.isPrepared(images)\n\n # broadcast the reference\n bcReg = images.rdd.context.broadcast(self)\n\n def fitandtransform(im, reg):\n t = reg.value.getTransform(im)\n return t.apply(im)\n\n newrdd = images.rdd.mapValues(lambda im: fitandtransform(im, bcReg))\n\n return Images(newrdd).__finalize__(images)", "def load_images(image_types=None,\n directory=None,\n images_per_type=None,\n image_size=224,\n process=False,\n model=mobilenet_v2):\n\n images_numpy = []\n images_class = []\n\n for image_type in image_types:\n images_path = os.path.join(directory, image_type, '*.jpg')\n for i, filename in enumerate(glob.glob(images_path)):\n try:\n if i == images_per_type:\n break\n loaded_image = load_img(filename, target_size=(image_size, image_size))\n images_numpy.append(img_to_array(loaded_image))\n images_class.append(image_type)\n except Exception as e:\n print('TypeError: {}'.format(e))\n\n if process:\n image_batch = np.expand_dims(images_numpy, axis=0)\n images_processed = model.preprocess_input(image_batch.copy())\n images_class_processed = process_images_class(images_class)\n\n return images_processed[0], images_class_processed\n\n else:\n return images_numpy, images_class", "def process_image(image):\n # Open the image using PIL\n pil_image = Image.open(image)\n \n # Resize the image to 256x256 while maintining aspect ratio\n if pil_image.width > pil_image.height:\n resize_dim = (int(pil_image.width*256 / pil_image.height), 256)\n else:\n resize_dim = (256, int(pil_image.height*256 / pil_image.width))\n \n pil_image = pil_image.resize(resize_dim)\n \n # Crop image to center 224 pixles\n crop_box_dim = 224\n left = (pil_image.width - crop_box_dim)/2\n top = (pil_image.height - crop_box_dim)/2\n right = pil_image.width - (pil_image.width - crop_box_dim)/2\n bottom = pil_image.height - (pil_image.height - crop_box_dim)/2\n pil_image = pil_image.crop((left, top, right, bottom))\n \n # Update color channels\n np_image = np.array(pil_image)\n np_image_means = np.array([0.485, 0.456, 0.406])\n np_image_stddev = np.array([0.229, 0.224, 0.225])\n np_image = (np_image/255 - np_image_means) / np_image_stddev\n \n # PIL images and numpy arrays have color channels in the 3rd dimension\n # Transpose them to first dimension to match what PyTorch expects\n np_image = np_image.transpose((2,0,1))\n\n return np_image", "def main(params):\n mpi_vs_multiprocess_logging(\"process\", params)\n\n ifg_paths = []\n for ifg_path in params[cf.INTERFEROGRAM_FILES]:\n ifg_paths.append(ifg_path.sampled_path)\n\n rows, cols = params[\"rows\"], params[\"cols\"]\n\n return process_ifgs(ifg_paths, params, rows, cols)", "def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"", "def _process_image_files_batch(thread_index, ranges, name, cnts, roots, num_shards):\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n if i < cnts[0]:\n subset_idx = 0\n filename = ('%06d' % i) + filename_suffix\n else:\n subset_idx = 1\n filename = ('%06d' % (i - cnts[0])) + filename_suffix\n\n try:\n if name == 'test':\n _left_image, _right_image = _process_image(filename, subset_idx, name=='test')\n else:\n _left_image, _right_image, _disparity, _mask = _process_image(filename, subset_idx, name=='test')\n except Exception as e:\n print(e)\n print('SKIPPED: Unexpected eror while decoding %s, %s, %s.' % (filename, subset_idx, name))\n print(_left_image.shape, _right_image.shape, _disparity.shape, _mask.shape)\n continue\n\n if name == 'test':\n example = _convert_to_example(filename, subset_idx, _left_image, _right_image)\n else:\n example = _convert_to_example(filename, subset_idx, _left_image, _right_image, _disparity, _mask)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()", "def resize_all_images(image_dir, width, height, resize_type):\n if width == \"\" and height == \"\":\n return 0\n print(\"Enter resizing image.\")\n print(\"Enter resizing.\", width)\n pool = Pool(1)\n pool.starmap(resize_image, zip(\n image_dir, itertools.repeat(width), itertools.repeat(height), itertools.repeat(resize_type)))\n pool.close()\n pool.join()", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def mpirun_pipeline(image=\"uber/horovod:0.13.11-tf1.10.0-torch0.4.0-py3.5\",\n\t\t\t\t\t\t batch_size=\"64\",\n\t\t\t\t\t\t optimizer='momentum',\n sync_source='https://github.com/tensorflow/benchmarks.git',\n git_sync_branch='cnn_tf_v1.9_compatible',\n data='user-susan:/training',\n gpus=1,\n workers=1,\n cpu_limit='2',\n metric='images/sec',\n memory_limit='10Gi'):\n\n env = ['NCCL_DEBUG=INFO','GIT_SYNC_BRANCH={0}'.format(git_sync_branch)]\n\n train=arena.mpi_job_op(\n \tname=\"all-reduce\",\n \timage=image,\n \tenv=env,\n data=[data],\n workers=workers,\n sync_source=sync_source,\n gpus=gpus,\n cpu_limit=cpu_limit,\n memory_limit=memory_limit,\n metrics=[metric],\n \tcommand=\"\"\"\n \tmpirun python code/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 \\\n \t--batch_size {0} --variable_update horovod --optimizer {1}\\\n \t--summary_verbosity=3 --save_summaries_steps=10\n \t\"\"\".format(batch_size, optimizer)\n )", "def main() -> co.Parallel:\n actors = [\"Oprah Winfrey\", \"Kate Mara\", \"Don Cheadle\", \"Dwayne Johnson\"]\n root = co.Parallel(image=_get_image())\n for actor in actors:\n root[actor] = co.Lazy(\n f\"python pipeline.py all_by_actor '{actor}'\"\n )\n return root", "def multi(video, processes):\n if processes < 0:\n processes = cpu_count() + processes\n elif processes == 0:\n raise ValueError('Number of processes must not be zero.')\n\n points = video.points\n points_split = tools.split_points(points, processes=processes)\n \n idi_kwargs = {\n 'cih_file': video.cih_file,\n }\n \n method_kwargs = {\n 'roi_size': video.method.roi_size, \n 'pad': video.method.pad, \n 'max_nfev': video.method.max_nfev, \n 'tol': video.method.tol, \n 'verbose': video.method.verbose, \n 'show_pbar': video.method.show_pbar\n }\n \n pool = Pool(processes=processes)\n results = [pool.apply_async(worker, args=(p, idi_kwargs, method_kwargs)) for p in points_split]\n pool.close()\n pool.join()\n\n out = []\n for r in results:\n _r = r.get()\n for i in _r:\n out.append(i)\n \n return np.asarray(out)", "def compute_img(self):\r\n self.load_img()\r\n self.check_shape()\r\n self.convert_img()\r\n self.img_computed = True", "def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def all_images():\n\n total = set()\n jobs = [nomad.parse(get_job(job.template)) for job in config.enabled_jobs]\n for spec in jobs:\n for image in nomad.get_images(spec):\n if image is not None and image != 'None':\n total |= set([image])\n return total", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def run(self,image, label, featureClasses, settings, enabledImageTypes,csvFile):\n print('Processing started')\n import time\n startTime = time.time()\n # grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(imageNode.GetName()))\n grayscaleImage = sitk.ReadImage(image)\n #sitkUtils.PushToSlicer(label, labelNode.GetName(), overwrite=True, compositeView=2)\n labelsDict = {}\n if label:\n print(\"label={}\".format(label))\n labelsDict = self.prepareLabelsFromLabelmap(label, grayscaleImage, labelsDict)\n # if segmentationNode:\n # labelsDict = self.prepareLabelsFromSegmentation(segmentationNode, grayscaleImage, labelsDict)\n\n #self.featureValues = extractor.execute(grayscaleImage, labelImage, images, **kwargs)\n featuresDict = {}\n for l in labelsDict.keys():\n print(\"Calculating features for \"+l)\n try:\n featuresDict[l] = self.calculateFeatures(grayscaleImage,\n labelsDict[l],\n featureClasses,\n settings,\n enabledImageTypes)\n except:\n print('calculateFeatures() failed')\n traceback.print_exc()\n self.saveFeatures2CSVFile(featuresDict,csvFile)\n print(\"Completed\")\n endtime = time.time()\n print(\"totalTime={}\".format(endtime-startTime))\n # return featuresDict", "def general_image_folder(opt):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n n_thread = 40\n ########################################################\n img_folder = opt['img_folder']\n lmdb_save_path = opt['lmdb_save_path']\n meta_info = {'name': opt['name']}\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = sorted(glob.glob(osp.join(img_folder, '*')))\n keys = []\n for img_path in all_img_list:\n keys.append(osp.splitext(osp.basename(img_path))[0])\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n resolutions = []\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if data.ndim == 2:\n H, W = data.shape\n C = 1\n else:\n H, W, C = data.shape\n txn.put(key_byte, data)\n resolutions.append('{:d}_{:d}_{:d}'.format(C, H, W))\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n # check whether all the images are the same size\n assert len(keys) == len(resolutions)\n if len(set(resolutions)) <= 1:\n meta_info['resolution'] = [resolutions[0]]\n meta_info['keys'] = keys\n print('All images have the same resolution. Simplify the meta info.')\n else:\n meta_info['resolution'] = resolutions\n meta_info['keys'] = keys\n print('Not all images have the same resolution. Save meta info for each image.')\n\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)", "def REDS(mode):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n if mode == 'train_sharp':\n img_folder = '../../datasets/REDS/train_sharp'\n lmdb_save_path = '../../datasets/REDS/train_sharp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_bicubic':\n img_folder = '../../datasets/REDS/train_sharp_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_sharp_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur_bicubic':\n img_folder = '../../datasets/REDS/train_blur_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_blur_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur':\n img_folder = '../../datasets/REDS/train_blur'\n lmdb_save_path = '../../datasets/REDS/train_blur_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_blur_comp':\n img_folder = '../../datasets/REDS/train_blur_comp'\n lmdb_save_path = '../../datasets/REDS/train_blur_comp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_flowx4':\n img_folder = '../../datasets/REDS/train_sharp_flowx4'\n lmdb_save_path = '../../datasets/REDS/train_sharp_flowx4.lmdb'\n H_dst, W_dst = 360, 320\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = data_util._get_paths_from_images(img_folder)\n keys = []\n for img_path in all_img_list:\n split_rlt = img_path.split('/')\n folder = split_rlt[-2]\n img_name = split_rlt[-1].split('.png')[0]\n keys.append(folder + '_' + img_name)\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if 'flow' in mode:\n H, W = data.shape\n assert H == H_dst and W == W_dst, 'different shape.'\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, 'different shape.'\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n meta_info = {}\n meta_info['name'] = 'REDS_{}_wval'.format(mode)\n channel = 1 if 'flow' in mode else 3\n meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)\n meta_info['keys'] = keys\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def execute(self, images_and_density_maps):\n # these imports are used in eval(), don't remove them\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n _ = cca_out, cca_trans\n\n op_str = self._get_op_str()\n const_str = self._get_const_str()\n\n for image_and_density_map in images_and_density_maps:\n rand_str = self._get_rand_str()\n args_str = \",\".join([const_str, rand_str]) if const_str and rand_str else const_str + rand_str\n op = eval(f\"{op_str}({args_str})\")\n for result in op.execute([image_and_density_map]):\n yield result", "def query(self, images):\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n return_images = torch.cat(return_images, 0) # collect all the images and return\n return return_images", "def execute_augmentation(queue_images: Queue, progress: tqdm, output: str, factor: int) -> None:\n while not queue_images.empty():\n element = queue_images.get(block=False)\n augmentation(element, output, factor)\n progress.update(1)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)", "def load_preprocess_images(image_paths: List[str], image_size: tuple) -> List[np.ndarray]:\n image_size = image_size[1:] # we do not need the number of channels\n images = []\n for image_path in image_paths:\n images.append(load_preprocess_image(image_path, image_size))\n return images", "def _compute_var_image_numpy_parallel(\n src_var_image: np.ndarray,\n dst_src_ij_images: np.ndarray,\n dst_var_image: np.ndarray\n):\n dst_height = dst_var_image.shape[-2]\n for dst_j in nb.prange(dst_height):\n _compute_var_image_for_dest_line(\n dst_j, src_var_image, dst_src_ij_images, dst_var_image\n )", "def process_next_image(self):\n if self.queue:\n next_queue_item = self.queue.popleft()\n if type(next_queue_item) == str:\n if next_queue_item == 'clear':\n self.signal_status_message.emit('Clearing ROI data (from request in image queue)')\n self.clear()\n return\n [image,file_id,image_num] = next_queue_item\n # print('image_num',image_num)\n # print('next image',self.next_image)\n self.signal_status_message.emit('Started processing ID {} Im {}'.format(file_id,image_num))\n image = image - self.emccd_bias # don't edit in place because this seemed to cause an issue with images not showing in GUI. Maybe not thread safe?\n # print('image min',np.min(image))\n # print('image max',np.max(image))\n image_num_too_big = False\n for group in self.roi_groups:\n for roi in group.rois:\n try:\n roi.counts[image_num][file_id] = image[roi.x:roi.x+roi.w,roi.y:roi.y+roi.h].sum()\n except IndexError: # image_num was not valid for the number of images that MAIA is expecting\n image_num_too_big = True\n if image_num_too_big:\n self.signal_status_message.emit('Image number {} is greater than max expected images, so this image has been ignored (most likely cause is rearrangement toggle).')\n self.signal_status_message.emit('Finished processing ID {} Im {}'.format(file_id,image_num))\n self.calculate_thresholds()", "def pair_images():\n # TODO: maybe implement some way to skip frames if queue is too long\n queue_a = xy_imgs\n queue_b = z_imgs\n if len(queue_a) == 0 or len(queue_b) == 0:\n return\n a_prev = None\n b_prev = None\n a = queue_a[0]\n b = queue_b[0]\n if a.ts < b.ts:\n while a.ts < b.ts:\n a_prev = queue_a.popleft()\n if len(queue_a) == 0:\n if b.within_threshold(a_prev):\n yield process_images(a_prev, b)\n return\n a = queue_a[0]\n closest_a = b.closest_to(a, a_prev)\n if closest_a is not None:\n yield process_images(closest_a, b)\n else:\n while b.ts < a.ts:\n b_prev = queue_b.popleft()\n if len(queue_b) == 0:\n if a.within_threshold(b_prev):\n yield process_images(a, b_prev)\n return\n b = queue_b[0]\n closest_b = a.closest_to(b, b_prev)\n if closest_b is not None:\n yield process_images(a, closest_b)" ]
[ "0.7019778", "0.701473", "0.68229294", "0.6802597", "0.67542833", "0.6570712", "0.6545006", "0.65141946", "0.6469355", "0.6455748", "0.6437736", "0.6373732", "0.6337872", "0.6320698", "0.6302606", "0.6255835", "0.6229472", "0.62148833", "0.6202636", "0.61866295", "0.61764526", "0.6150914", "0.61422175", "0.6139899", "0.61092854", "0.6103789", "0.6096633", "0.6084338", "0.6081984", "0.6056349", "0.60544425", "0.60125476", "0.6011628", "0.6003218", "0.5985592", "0.5985277", "0.59809476", "0.5966458", "0.5962644", "0.5954697", "0.5952728", "0.5945884", "0.59340256", "0.5914991", "0.5910856", "0.5903178", "0.5888857", "0.58792806", "0.58637893", "0.5862652", "0.5846829", "0.5844264", "0.5836815", "0.58289874", "0.5828507", "0.5827354", "0.58261764", "0.5823627", "0.58176905", "0.5815725", "0.5812806", "0.58059716", "0.58056295", "0.58014864", "0.5799791", "0.57885695", "0.57799655", "0.5779944", "0.57715267", "0.5769696", "0.5757933", "0.5754358", "0.57370317", "0.5736786", "0.5733541", "0.5727277", "0.5724785", "0.57194847", "0.57165354", "0.57104695", "0.57073003", "0.5702725", "0.57012105", "0.569182", "0.56909305", "0.56787676", "0.56767887", "0.56750906", "0.56750816", "0.56702733", "0.5659374", "0.56571764", "0.5652742", "0.5648781", "0.5643737", "0.56334025", "0.5629055", "0.5619187", "0.56186384", "0.5611631" ]
0.56445336
94
Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image.
def _process_image_local(raw_image_path): return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(image):\n pass", "def process_image(self):\n pass", "def process(self, image):", "def process(self, image: np.ndarray) -> NamedTuple:\n\n return super().process(input_data={'image': image})", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if image_proc_type == \"contrast stretching\":\n info = process_contrast_stretch(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with contrast stretching')\n\n if image_proc_type == \"adaptive equalization\":\n info = process_adapt_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with adaptive equalization')\n\n if image_proc_type == \"histogram equalization\":\n info = process_histogram_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with histogram equalization')\n\n if image_proc_type == \"reverse video\":\n info = process_reverse_image(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with reverse image')\n\n if image_proc_type == \"log compression\":\n info = process_log_compression(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with log compression')\n\n return jsonify(\"it worked\")", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def _build_final_image(self, image):\n raise NotImplementedError", "def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()", "def process(\n self,\n image: np.array\n ) -> np.array:\n pass", "def process_image_debug(self, image):\n return self.process_image(image, debug=True)", "def processImage(currentImage):\n\tprint currentImage + ' is the current image and is being processed...'\n\tstackImage(currentImage, overlayImage, processAllPictures)", "def process_image(encoded_image, config, thread_id=0):\r\n return image_processing.process_image(encoded_image,\r\n is_training=False,\r\n height=config.image_height,\r\n width=config.image_width,\r\n thread_id=thread_id,\r\n image_format=config.image_format)", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def process(self):\n return self.output_image", "def build_container_image(self) -> None:\n print_version_of_tools()\n try:\n self.fs_watcher.start()\n runner = PluginsRunner(self,\n self.plugins_conf,\n self.plugin_files,\n self.keep_plugins_running,\n plugins_results=self.data.plugins_results)\n runner.run()\n finally:\n self.fs_watcher.finish()", "def main():\n time_start = perf_counter()\n\n args = parse_args(sys.argv[1:]).ordered()\n _, opts = next(args)\n log_level = 0\n try:\n log_level = (0, 20, 10)[opts.verbosity]\n mpl_log_level = log_level + 10 if log_level > 0 else log_level\n except IndexError:\n log_level = 10\n mpl_log_level = log_level\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n # set level for all loggers\n # separate log level for matplotlib because it's so verbose\n for logger in loggers:\n if logger.name.startswith(\"matplotlib\"):\n logger.setLevel(mpl_log_level)\n else:\n logger.setLevel(log_level)\n\n LOG.debug(\"Program opts:\\n%s\", pformat(vars(opts)))\n\n # main vars\n inputs = []\n processed = []\n # im: Optional[Image.Image] = None\n im: Image.Image | np.ndarray | None = None\n in_file_path: Optional[str]\n in_image_size = Size(0, 0)\n in_file_size = 0\n in_dpi = 0\n in_exif: Optional[dict] = None\n out_exif: bytes = b\"\"\n out_exif_size = 0\n out_file_path = None\n out_image_size = Size(0, 0)\n out_file_size = 0\n no_op = False\n\n for cmd, arg in args:\n LOG.debug(\"Processing command %s with args:\\n%s\", cmd, pformat(vars(arg)))\n\n if cmd == \"open\":\n in_file_path = arg.input.name\n in_file_size = os.path.getsize(in_file_path) # type: ignore\n im = Image.open(arg.input)\n in_image_size = Size(*im.size)\n LOG.info(\"Input dims: %s\", in_image_size)\n try:\n in_exif = piexif.load(in_file_path)\n del in_exif[\"thumbnail\"]\n # LOG.debug(\"Exif: %s\", in_exif)\n in_dpi = im.info[\"dpi\"]\n except KeyError:\n pass\n LOG.info(\"Input file size: %s\", humanize_bytes(in_file_size))\n LOG.info(\"Input dpi: %s\", in_dpi)\n if arg.show_histogram:\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"open2\":\n # Test of opening multiple images for some operations, such as matting\n for item in arg.input:\n _im = Image.open(item)\n try:\n ex = piexif.load(item.name)\n dpi = _im.info[\"dpi\"]\n del ex[\"thumbnail\"]\n except KeyError:\n ex = None\n dpi = (0, 0)\n _im = np.asarray(_im)\n _im = cv2.cvtColor(_im, cv2.COLOR_RGB2BGR)\n inputs.append(\n Img(\n _im,\n file_path=item.name,\n dpi=dpi,\n exif=ex,\n )\n )\n LOG.debug(\"Imgs: %s\", inputs)\n im = inputs[0].data\n in_file_path = inputs[0].file_path\n in_file_size = inputs[0].file_size\n in_image_size = inputs[0].size\n if arg.show_histogram:\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"mat\":\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n im = mat.create_mat(im, size_inches=arg.size)\n out_image_size = Size.from_np(im)\n elif cmd == \"resize\":\n im = Image.fromarray(im) if type(im) == np.ndarray else im\n if is_ndarray(im) or im is None:\n raise TypeError('Expected Image, not ndarray')\n orig_size = Size(*im.size) # type: ignore\n out_image_size = orig_size\n try:\n resize_method, new_size = resize.get_method(\n orig_size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n else:\n # Resize/resample\n try:\n im = resize.resize(\n resize_method,\n im,\n new_size,\n )\n except ImageTooSmallError as e:\n LOG.warning(e)\n out_image_size = Size(*im.size) # type: ignore\n elif cmd == \"resize2\":\n for item in inputs:\n try:\n resize_method, new_size = resize.get_method(\n item.size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n force=arg.force,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n except ResizeAttributeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n else:\n try:\n _im = resize.resize_opencv(\n resize_method, item.data, new_size, resample=cv2.INTER_AREA\n )\n if _im is not None:\n processed.append(Img(_im))\n else:\n LOG.error('Expected image from resize_opencv(), got None')\n except ImageTooSmallError as e:\n LOG.warning(e)\n LOG.info(processed)\n out_image_size = processed[0].size\n im = processed[0].data\n elif cmd == \"text\":\n if im is None:\n LOG.error('Image is None')\n return\n im = watermark.with_text(\n im,\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n ) # type: ignore\n elif cmd == \"text2\":\n im = watermark.with_text(\n Image.fromarray(im),\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n )\n im = np.asarray(im)\n elif cmd == \"watermark\":\n im = watermark.with_image(\n im,\n Image.open(arg.image),\n scale=arg.scale,\n position=arg.position,\n padding=arg.margin,\n opacity=arg.opacity,\n invert=arg.invert,\n )\n elif cmd == \"watermark2\":\n watermark_image = cv2.imread(arg.image.name, cv2.IMREAD_UNCHANGED)\n # im = watermark.with_image_opencv(\n # im,\n # watermark_image,\n # scale=arg.scale,\n # position=arg.position,\n # opacity=arg.opacity,\n # padding=arg.margin,\n # )\n try:\n im = watermark.overlay_transparent(\n im,\n watermark_image,\n scale=arg.scale,\n padding=arg.margin,\n position=arg.position,\n alpha=arg.opacity,\n invert=arg.invert,\n )\n except OverlaySizeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n elif cmd == \"sharpen\":\n im = sharpen.unsharp_mask(im, amount=arg.amount, threshold=arg.threshold)\n elif cmd == \"save\":\n # if type(im) == np.ndarray:\n # im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n use_progressive_jpg = in_file_size > 10000\n if use_progressive_jpg:\n LOG.debug(\"Large file; using progressive jpg\")\n\n # Exif\n if arg.keep_exif:\n out_exif = piexif.dump(piexif.load(in_file_path))\n out_exif_size = sys.getsizeof(out_exif)\n\n outbuf = BytesIO()\n try:\n im.save(\n outbuf,\n \"JPEG\",\n quality=arg.jpg_quality,\n dpi=in_dpi,\n progressive=use_progressive_jpg,\n optimize=True,\n exif=out_exif,\n )\n except AttributeError:\n write_params = [\n cv2.IMWRITE_JPEG_QUALITY,\n arg.jpg_quality,\n cv2.IMWRITE_JPEG_OPTIMIZE,\n ]\n if use_progressive_jpg:\n write_params += [\n cv2.IMWRITE_JPEG_PROGRESSIVE,\n ]\n _, buf = cv2.imencode(\".jpg\", im, write_params)\n outbuf = BytesIO(buf)\n image_buffer = outbuf.getbuffer()\n out_file_size = image_buffer.nbytes + out_exif_size\n LOG.info(\"Buffer output size: %s\", humanize_bytes(out_file_size))\n\n if arg.output is None:\n root, _ = os.path.splitext(in_file_path)\n out_file_path = f\"{root}{arg.suffix}.jpg\"\n else:\n out_file_path = arg.output.name\n\n if arg.no_op:\n no_op = True\n continue\n LOG.info(\"Saving buffer to %s\", out_file_path)\n if (out_path := Path(out_file_path)).exists():\n if not arg.force:\n LOG.critical(\n \"file '%s' exists and force argument not found\", out_path\n )\n print(\n f\"{fg.red}{ef.bold}Error: file '{out_path}' exists;\",\n f\" use -f option to force overwrite.{rs.all}\",\n file=sys.stderr,\n )\n return\n # Create output dir if it doesn't exist\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n with out_path.open(\"wb\") as f:\n f.write(image_buffer)\n if arg.keep_exif:\n piexif.insert(out_exif, out_file_path)\n out_file_size = os.path.getsize(out_file_path)\n\n elapsed = perf_counter() - time_start\n report = generate_report(\n in_file_size,\n out_file_size,\n in_file_path,\n out_file_path,\n in_image_size,\n out_image_size,\n elapsed,\n no_op,\n )\n print(report)", "def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data", "def __loadImage(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'image':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n\n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"undefined:/%s\" % base64.b64encode(imageData)\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n \n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"local-tests:/%s\" % base64.b64encode(imageData)\n else:\n pass", "def __processImage(self, f):\n try:\n imgobj = Image.open(f).convert('RGB')\n except:\n return None\n w, h = imgobj.size\n if w < h:\n # reduce width to required dimension and adjust height accordingly\n new_h = int(h * self.PROCESSING_DIM / w)\n resizedImg = imgobj.resize((self.PROCESSING_DIM, new_h))\n\n y_start = int(new_h / 2 - self.PROCESSING_DIM / 2)\n processedImage = resizedImg.crop((0, y_start, self.PROCESSING_DIM, y_start + self.PROCESSING_DIM))\n\n else:\n # reduce height to required dimension and adjust width accordingly\n new_w = int(w * self.PROCESSING_DIM / h)\n resizedImg = imgobj.resize((new_w, self.PROCESSING_DIM))\n\n x_start = int(new_w / 2 - self.PROCESSING_DIM / 2)\n processedImage = resizedImg.crop((x_start, 0, x_start + self.PROCESSING_DIM, self.PROCESSING_DIM))\n\n return processedImage", "def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)", "def imageProcessing():\n\n # Parser initialization\n parser = argparse.ArgumentParser(description=colourers.toCyan('Image processor for reading/writing images into BMP/PNG formats and applying transformations on it.'))\n \n # Formats Parser\n group = parser.add_argument_group(colourers.toGreen('formats'))\n formatParser = group.add_mutually_exclusive_group(required=True)\n formatParser.add_argument('--bmp',\n type=str,\n metavar=colourers.toRed('<bmp file name>'), \n help=colourers.toMagenta('bmp file to parse'))\n formatParser.add_argument('--png',\n type=str,\n metavar=colourers.toRed('<png file name>'),\n help=colourers.toMagenta('png file to parse'))\n\n # Printers Parser\n group = parser.add_argument_group(colourers.toYellow('printers'))\n printers = group.add_mutually_exclusive_group()\n printers.add_argument('--header',\n help=colourers.toMagenta('print the file format header'),\n action='store_true')\n printers.add_argument('--print-color',\n '-pc',\n type=int,\n nargs=2,\n metavar=(colourers.toRed('<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('pixel to print'))\n printers.add_argument('--histogram',\n action='store_true',\n help=colourers.toMagenta('print histogram associated'))\n printers.add_argument('--output',\n '-o',\n type=str,\n metavar=colourers.toRed('<output file>'),\n help=colourers.toMagenta('image output file'))\n\n # Transformers Parser\n transformers = parser.add_argument_group(colourers.toBlue('transformers'))\n transformers.add_argument('--half',\n action='store_true',\n help='applying the filter on one half of the image')\n transformers.add_argument('--rotate',\n '-r',\n type=int,\n choices=[90, 180, 270],\n metavar=colourers.toRed('<degree of rotation>'),\n help=colourers.toMagenta('rotate the image'))\n transformers.add_argument('--scale',\n '-s',\n type=int,\n nargs='+',\n action=required_length(1, 2),\n metavar=(colourers.toRed('<scaleRatio> | [<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('scale/shrink the image'))\n transformers.add_argument('--contrast',\n '-c',\n type=float,\n metavar=colourers.toRed('<contrast factor>'),\n help=colourers.toMagenta('apply a factor contrast'))\n transformers.add_argument('--grayscale',\n '-gs',\n action='store_true',\n help=colourers.toMagenta('to grayscale image'))\n transformers.add_argument('--binary',\n '-b',\n action='store_true',\n help=colourers.toMagenta('to binary image'))\n transformers.add_argument('--invert',\n '-i',\n action='store_true',\n help=colourers.toMagenta('to inverted image, equivalent to --contrast -1'))\n transformers.add_argument('--channel',\n type=str,\n choices=['blue', 'green', 'red'],\n metavar=colourers.toRed('<channel>'),\n nargs='+',\n action=required_length(1, 2),\n help=colourers.toMagenta('to the specified channel'))\n \n # Filters Parser\n filters = parser.add_argument_group(colourers.toCyan('filters'))\n filters.add_argument('--edge-detection',\n '-ed',\n type=str,\n choices=['canny', 'sobel', 'prewitt', 'roberts', 'kirsch'],\n metavar=colourers.toRed('<filter name>'),\n help=colourers.toMagenta('perform an edge detection'))\n filters.add_argument('--retrieve-color',\n '-rv',\n action='store_true',\n help=colourers.toMagenta('retrieve the colors of a grayscale image'))\n filters.add_argument('--edge-enhancement',\n '-ee',\n action='store_true', \n help=colourers.toMagenta('applying increased edge enhancement filter'))\n filters.add_argument('--sharpen',\n action='store_true',\n help=colourers.toMagenta('sharpening the image'))\n filters.add_argument('--unsharp',\n action='store_true',\n help=colourers.toMagenta('unsharp the image')) \n filters.add_argument('--denoise',\n action='store_true',\n help=colourers.toMagenta('denoise the image'))\n filters.add_argument('--texture-detection',\n '-td',\n action='store_true',\n help=colourers.toMagenta('applying texture detection (Gabor Filter)'))\n filters.add_argument('--blur',\n type=str,\n choices=['simple', 'more', 'average', 'gaussian', 'motion'],\n metavar=colourers.toRed('<type of blur>'),\n help=colourers.toMagenta('perform the selected blur'))\n filters.add_argument('--blur-iteration',\n '-bi',\n type=int,\n default=1,\n metavar=colourers.toRed('<number of iteration>'),\n help=colourers.toMagenta('apply N times the blur function'))\n filters.add_argument('--emboss',\n action='store_true',\n help=colourers.toMagenta('perform an embossing filter'))\n filters.add_argument('--overlap',\n type=str,\n nargs='+',\n metavar=colourers.toRed('<image to overlap>'),\n help=colourers.toMagenta('overlap an image given on the selected image'))\n\n # Args parsing\n args = parser.parse_args()\n\n filename = \"\"\n # BMP Block\n if args.bmp:\n filename = args.bmp\n\n if not os.path.isfile(filename):\n colourers.error('\"{}\" does not exist !'.format(filename))\n sys.exit(-1)\n colourers.success('Success Opening {}...'.format(filename))\n\n bmp = BMP(filename)\n half = args.half\n\n if args.print_color:\n width, height = args.print_color\n colourers.info(f'Printing pixel color of ({width}, {height})')\n Printers.printPixel(bmp, width, height)\n sys.exit(0)\n \n elif args.header:\n colourers.info(f'Printing BMP header of {bmp.filename}')\n Printers.printHeader(bmp)\n sys.exit(0)\n \n elif args.histogram:\n colourers.info(f'Printing color histogram of {bmp.filename}')\n Printers.printHistogram(bmp)\n sys.exit(0)\n \n if (args.rotate or args.scale or args.contrast or args.grayscale or \n args.binary or args.channel or args.edge_detection or args.retrieve_color or\n args.edge_enhancement or args.blur or args.emboss or args.overlap or args.texture_detection or\n args.denoise or args.sharpen or args.unsharp):\n if not hp.atLeastOne(args.output, (\n args.rotate,\n args.scale,\n args.contrast,\n args.grayscale,\n args.binary,\n args.channel,\n args.edge_detection,\n args.retrieve_color,\n args.edge_enhancement,\n args.blur,\n args.emboss,\n args.overlap,\n args.texture_detection,\n args.denoise,\n args.sharpen,\n args.unsharp\n )):\n parser.error('--rotate/--scale/--contrast/--grayscale/--binary/--channel/--edge-detection/--retrieve-color/--edge-enhancement/--blur/--emboss/--overlap/--texture-detection/--denoise/--sharpen/--unsharp and --output must be given together')\n \n if args.rotate:\n degree = args.rotate\n colourers.info(f'Rotating image to {degree} degree')\n bmp.imageData = Transformers.rotate(bmp, degree)\n\n if args.scale:\n if len(args.scale) == 2:\n width, height = args.scale\n colourers.info(f'Scaling image to {width}x{height} pixels')\n bmp.imageData = Transformers.scale(bmp, height, width)\n else:\n scaleRatio = args.scale[0]\n\n colourers.info(f'Scaling image to {scaleRatio} scale ratio')\n\n height = int(hp.readLittleEndian(bmp.height))\n width = int(hp.readLittleEndian(bmp.width))\n\n bmp.imageData = Transformers.scale(bmp, height * scaleRatio, width * scaleRatio)\n \n if args.contrast:\n factor = args.contrast\n colourers.info(f'Applying a factor contrast of {factor}')\n bmp.imageData = Transformers.contrast(bmp, factor)\n \n if args.grayscale:\n colourers.info(f'Applying grayscale mask to the image')\n bmp.imageData = Transformers.grayscale(bmp, half)\n \n if args.binary:\n colourers.info(f'Applying binary mask to the image')\n bmp.imageData = Transformers.binary(bmp, half)\n \n if args.invert:\n colourers.info(f'Inverting image colours')\n bmp.imageData = Transformers.invert(bmp, half)\n \n if args.channel:\n if len(args.channel) == 2:\n c1, c2 = args.channel\n colourers.info(f'Keeping only {c1} and {c2} channels of the image')\n bmp.imageData = Transformers.toChannel(bmp, [c1, c2], half)\n else:\n channel = args.channel[0]\n colourers.info(f'Keeping only {channel} channel of the image')\n bmp.imageData = Transformers.toChannel(bmp, channel, half)\n \n if args.denoise:\n colourers.info(f'Denoising the image')\n bmp.imageData = Filters.wienerFilter(bmp.imageData, gaussianKernel(9, sigma=0.33), K=10)\n \n if args.texture_detection:\n colourers.info(f'Applying texture detection (Gabor Filter)')\n bmp.imageData = Filters.gaborFilter(bmp.imageData, gaborKernel(0))\n \n if args.edge_enhancement:\n colourers.info(f'Applying increased edge enhancement filter')\n bmp.imageData = Filters.iee(bmp.imageData)\n\n if args.edge_detection:\n filterName = args.edge_detection\n if filterName == 'canny':\n colourers.info(f'Performing Canny filter for edge detection')\n bmp.imageData = Filters.ced(bmp.imageData, sigma=0.33, kernelSize=9, weakPix=50)\n if filterName == 'sobel':\n colourers.info(f'Performing Sobel filter for edge detection')\n bmp.imageData = Filters.sed(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'prewitt':\n colourers.info(f'Performing Prewitt filter for edge detection')\n bmp.imageData = Filters.ped(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'roberts':\n colourers.info(f'Performing Roberts filter for edge detection')\n bmp.imageData = Filters.red(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'kirsch':\n colourers.info(f'Performing Kirsch filter for edge detection')\n bmp.imageData = Filters.ked(bmp.imageData, sigma=0.33, kernelSize=9)\n\n if args.sharpen:\n colourers.info(f'Sharpening the image')\n bmp.imageData = Filters.sharpen(bmp.imageData)\n \n if args.unsharp:\n colourers.info(f'Unsharpening the image')\n bmp.imageData = Filters.unsharp(bmp.imageData)\n\n if args.retrieve_color:\n colourers.info(f'Retrieving color')\n bmp.imageData = Filters.retrieveColor(bmp.imageData)\n \n if args.blur:\n blurType = args.blur\n colourers.info(f'Performing a {blurType} blur')\n for _ in range(args.blur_iteration):\n blurFunc = Filters.blur.switcher.get(blurType)\n bmp.imageData = blurFunc(bmp.imageData)\n \n if args.emboss:\n colourers.info(f'Performing emboss filter')\n bmp.imageData = Filters.emboss(bmp.imageData)\n \n if args.overlap:\n overlappers = []\n for ov in args.overlap:\n overlappers.append(BMP(ov).imageData)\n colourers.info(f'Performing an overlapping between {bmp.filename} and {args.overlap}')\n bmp.imageData = Filters.overlap(bmp.imageData, overlappers)\n \n if args.output:\n outputFile = args.output\n hp.saveBMP(bmp, bmp.imageData, outputFile)\n colourers.success(f'Succesfully saved into {outputFile}')\n sys.exit(0)\n \n parser.error('Give at least one more argument')\n \n # PNG Block\n else:\n filename = args.png\n\n if not os.path.isfile(filename):\n print('\"{}\" does not exist'.format(filename), file=sys.stderr)\n sys.exit(-1)\n print('Success Opening {}...'.format(filename))\n \n png = PNG(filename)", "def __processImage(self):\n userName = getpass.getuser()\n pathImage = os.path.expanduser('~/StructureImage')\n imageFile = pathImage + '/structure.png'\n try:\n print('read the image')\n binaryImage = sciimage.imread(imageFile, True)\n except FileNotFoundError:\n print('The image file or the directory does not exist.')\n except:\n print('Other errors happen.')\n ySize, xSize = binaryImage.shape\n xPosition = []; yPosition = []\n for i in sp.arange(ySize):\n for j in sp.arange(xSize):\n if (binaryImage[i, j] == 0.0):\n yPosition.append(i)\n xPosition.append(j)\n xPosition = np.array(xPosition); yPosition = np.array(yPosition)\n xMin = xPosition.min(); xMax = xPosition.max()\n yMin = yPosition.min(); yMax = yPosition.max()\n #redefine the domain\n if (self.duplicateDomain == \"'no'\"):\n self.effectiveDomain = binaryImage[yMin:(yMax + 1), xMin:(xMax + 1)]\n elif (self.duplicateDomain == \"'yes'\"):\n tmpDomain = binaryImage[yMin:(yMax + 1), xMin:(xMax + 1)]\n xDirectionNum = int(input(\"Number of duplication in x direction: \"))\n yDirectionNum = int(input(\"Number of duplication in y direction: \"))\n self.effectiveDomain = self.__expandImageDomain(tmpDomain, xDirectionNum, \\\n yDirectionNum)\n yDimension, xDimension = self.effectiveDomain.shape\n self.effectiveDomain[:, 0] = 0.; self.effectiveDomain[:, -1] = 0.\n tmpBufferLayer = np.zeros(xDimension, dtype = np.float64)\n tmpBufferLayer[:] = 255.\n for i in sp.arange(40):\n if (i < 20):\n self.effectiveDomain = np.vstack((tmpBufferLayer, self.effectiveDomain))\n else:\n self.effectiveDomain = np.vstack((self.effectiveDomain, tmpBufferLayer))", "def process(self, _edObject=None):\n\n EDPluginExec.process(self)\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10.process\")\n\n# try:\n# except Exception:\n# edfImage = EDF(self.inputFilename)\n# self.npaImage = edfImage.GetData(0)\n\n# Read the image using FABIO\n isRGB = False\n pilOutputImage = None\n if self.inputFilename is not None:\n try:\n fabioImage = openimage(self.inputFilename)\n self.npaImage = fabioImage.data\n except Exception:\n pilInputImage = Image.open(self.inputFilename)\n x, y = pilInputImage.size\n ImageFile.MAXBLOCK = x * y\n if pilInputImage.mode == \"1\":\n self.npaImage = numpy.asarray(pilInputImage).astype(\"uint8\")\n isRGB = False\n elif pilInputImage.mode == \"F\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = False\n elif pilInputImage.mode == \"L\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = False\n elif pilInputImage.mode == \"P\":\n self.npaImage = numpy.asarray(pilInputImage.convert(\"RGB\"))\n isRGB = True\n elif pilInputImage.mode == \"RGB\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = True\n elif pilInputImage.mode == \"CMJK\":\n self.npaImage = numpy.asarray(pilInputImage.convert(\"RGB\"))\n isRGB = True\n\n dtype = self.npaImage.dtype\n NPAImageFloat = None\n\n# crop border\n if len(self.cropBorders) > 0:\n\n if len(self.cropBorders) == 1:\n crop0 = self.cropBorders[0]\n crop1 = self.cropBorders[0]\n else:\n crop0 = self.cropBorders[0]\n crop1 = self.cropBorders[1]\n if isRGB:\n self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1, :]\n else:\n self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1]\n\n\n# Set maxima and minima\n if (self.minLevelUnit is not None) or (self.maxLevelUnit is not None):\n sortedArray = self.npaImage.flatten()\n sortedArray.sort()\n\n if self.minLevel is not None:\n self.normalize = True\n if isRGB:\n EDVerbose.warning(\"It is not allowed to set Min with RGB data\")\n else:\n if self.minLevelUnit in [\"%\", \"percent\"]:\n self.minLevel = sortedArray[int(round(float(self.minLevel) * sortedArray.size / 100.0))]\n if isinstance(self.npaImage[0, 0], int):\n self.npaImage = numpy.maximum(self.npaImage, int(self.minLevel) * numpy.ones_like(self.npaImage))\n else:\n self.npaImage = numpy.maximum(self.npaImage, self.minLevel * numpy.ones_like(self.npaImage))\n\n if self.maxLevel is not None:\n self.normalize = True\n if isRGB:\n EDVerbose.warning(\"It is not allowed to set Max with RGB data\")\n else:\n if self.maxLevelUnit in [\"%\", \"percent\"]:\n self.maxLevel = sortedArray[int(round(float(self.maxLevel) * sortedArray.size / 100.0))]\n if isinstance(self.npaImage[0, 0], int):\n self.npaImage = numpy.minimum(self.npaImage, int(self.maxLevel) * numpy.ones_like(self.npaImage))\n else:\n self.npaImage = numpy.minimum(self.npaImage, self.maxLevel * numpy.ones_like(self.npaImage))\n\n# Scipy filters come here:\n if len(self.gaussianBlur) > 0:\n if len(self.gaussianBlur) == 1 :\n kernel = (self.gaussianBlur[0], self.gaussianBlur[0])\n else:\n kernel = (self.gaussianBlur[0], self.gaussianBlur[1])\n if isRGB:\n kernel = (kernel[0], kernel[1], 0)\n self.npaImage = scipy.ndimage.gaussian_filter(self.npaImage, kernel)\n\n if len(self.dilatation) > 0:\n if len(self.dilatation) == 1:\n kernel = (self.dilatation[0], self.dilatation[0])\n else:\n kernel = (self.dilatation[0], self.dilatation[1])\n if isRGB:\n kernel = (kernel[0], kernel[1], 0)\n self.npaImage = scipy.ndimage.morphology.grey_dilation(self.npaImage, kernel)\n\n\n#Normalization ; equalization\n if (self.normalize is True) or (self.equalize is True):\n if isRGB is True:\n self.npaImage = numpy.asarray(ImageOps.equalize(Image.fromarray(self.npaImage)))\n else:\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10: Normalization\")\n vmin = self.npaImage.min()\n vmax = self.npaImage.max()\n NPAImageFloat = (self.npaImage.astype(numpy.float32) - float(vmin)) / (float(vmax) - float(vmin))\n if (self.equalize == True):\n nbr_bins = 64\n NPAImageFloatFlat = NPAImageFloat.flatten()\n imhist, bins = numpy.histogram(NPAImageFloatFlat, nbr_bins, normed=True) #get image histogram\n cdf = imhist.cumsum() #cumulative distribution function\n ncdf = cdf / cdf[-1] #normalized cumulative distribution function\n# print ncdf\n NPAImageFloat2Flat = numpy.interp(NPAImageFloatFlat, bins, [0] + ncdf.tolist())\n NPAImageFloat = NPAImageFloat2Flat.reshape(NPAImageFloat.shape)\n EDVerbose.DEBUG(\"Equalize: min= %f, max= %f\" % (NPAImageFloat.min(), NPAImageFloat.max()))\n\n#Gamma and logarithm scale\n if ((self.log is True) or (self.gamma != 1)) and (NPAImageFloat is None): # then we need the array in float \n if dtype == numpy.uint8:\n NPAImageFloat = self.npaImage.astype(numpy.float32) / 255.0\n elif dtype == numpy.uint16:\n NPAImageFloat = self.npaImage.astype(numpy.float32) / 65535.0\n else:\n NPAImageFloat = self.npaImage.astype(numpy.float32)\n\n if self.log is True:\n NPAImageFloat = numpy.log(1 - NPAImageFloat.min() + NPAImageFloat)\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin)\n\n if self.gamma != 1:\n if dtype not in [numpy.uint8, numpy.uint16]:\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin)\n NPAImageInt = (255.0 * (NPAImageFloat ** self.gamma)).astype(\"uint8\")\n\n else: #if (self.gamma == 1):\n if NPAImageFloat is None:\n if dtype == numpy.uint8:\n NPAImageInt = self.npaImage\n elif dtype == numpy.uint16:\n NPAImageInt = (self.npaImage / 256).astype(numpy.uint8)\n else: #for float or a signed integer\n vmin = self.npaImage.min()\n vmax = self.npaImage.max()\n NPAImageInt = ((self.npaImage.astype(numpy.float32) - vmin) / (vmax - vmin) * 255.0).astype(numpy.uint8)\n else:\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10: NPAImageFloat => NPAImageInt min=%s max =%s\" % (vmin, vmax))\n NPAImageInt = ((NPAImageFloat - vmin) * 255.0 / (vmax - vmin)).astype(numpy.uint8)\n#COnversion back to PIL mode\n if isRGB is True:\n pilOutputImage = Image.fromarray(NPAImageInt, 'RGB')\n else:\n pilOutputImage = Image.fromarray(NPAImageInt, 'L')\n\n if (self.autocontrast is not None):\n pilOutputImage = ImageOps.autocontrast(pilOutputImage, self.autocontrast)\n\n if (self.width is not None) or (self.height is not None):\n if (self.width > 0) and (self.height > 0):\n if self.keepRatio is True:\n# PIL takes care of the ratio\n pilOutputImage.thumbnail((self.width, self.height), Image.ANTIALIAS)\n else:\n pilOutputImage = pilOutputImage.resize((self.width, self.height), Image.ANTIALIAS)\n else:\n if self.width is None:\n pilOutputImage.thumbnail((self.height, self.height), Image.ANTIALIAS)\n elif self.height is None:\n pilOutputImage.thumbnail((self.width, self.width), Image.ANTIALIAS)\n\n if self.invert == True:\n pilOutputImage = ImageOps.invert(pilOutputImage)\n if self.colorize == True:\n pilOutputImage.putpalette(EDPluginExecThumbnailv10.getPalette())\n pilOutputImage = pilOutputImage.convert(\"RGB\")\n\n self.synchronizeOn()\n if self.format == \"jpg\":\n self.width, self.height = pilOutputImage.size\n if self.width * self.height > ImageFile.MAXBLOCK:\n ImageFile.MAXBLOCK = self.width * self.height\n try:\n pilOutputImage.save(self.output, \"JPEG\", quality=85, optimize=True)\n except TypeError:\n pilOutputImage.save(self.output)\n else:\n pilOutputImage.save(self.output)\n self.synchronizeOff()", "def process_image_file(self, image_file):\n image = image_util.load_image_from_file(image_file)\n return self.process_image(image)", "def _process_images(\n raw_image_paths: pd.Series,\n raw_images_dir: str,\n ROI_definitions: Dict[str, Tuple],\n flat_field_filepath_or_none: Union[str, None],\n save_ROIs: bool,\n save_dark_frame_corrected_images: bool,\n save_flat_field_corrected_images: bool,\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n def _process_image_local(raw_image_path):\n \"\"\" Version of process_image with all of the local configuration variables packed in.\n Also encapsulates the opening of the image.\n \"\"\"\n return process_image(\n original_rgb_image=raw.open.as_rgb(raw_image_path),\n original_image_filepath=raw_image_path,\n raw_images_dir=raw_images_dir,\n ROI_definitions=ROI_definitions,\n flat_field_filepath_or_none=flat_field_filepath_or_none,\n save_ROIs=save_ROIs,\n save_dark_frame_corrected_image=save_dark_frame_corrected_images,\n save_flat_field_corrected_image=save_flat_field_corrected_images,\n )\n\n with ThreadPoolExecutor() as executor:\n # We want identical warnings to be shown only for the first image they occur on (the default),\n # but we also want subsequent calls to process_experiment to start with a fresh warning store\n # so that warnings don't stop showing after the first run.\n # catch_warnings gives us this fresh warning store.\n with warnings.catch_warnings():\n # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples\n roi_summary_data_and_image_diagnostics_dfs_for_files = list(\n tqdm(\n executor.map(_process_image_local, raw_image_paths),\n total=len(raw_image_paths),\n )\n )\n roi_summary_data_for_files, image_diagnostics_for_files = zip(\n *roi_summary_data_and_image_diagnostics_dfs_for_files\n )\n\n roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files)\n image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files)\n\n return roi_summary_data_for_all_files, image_diagnostics_for_all_files", "def process_image(image):\n image = resize(image)\n return image", "def process_image(self, encoded_image, thread_id=0):\n return image_processing.process_image(encoded_image,\n is_training=self.is_training(),\n height=self.config.image_height,\n width=self.config.image_width,\n thread_id=thread_id,\n image_format=self.config.image_format)", "def RemoteBuild(self, image):\n raise NotImplementedError()", "def process_image(image):\n # Open the image using PIL\n pil_image = Image.open(image)\n \n # Resize the image to 256x256 while maintining aspect ratio\n if pil_image.width > pil_image.height:\n resize_dim = (int(pil_image.width*256 / pil_image.height), 256)\n else:\n resize_dim = (256, int(pil_image.height*256 / pil_image.width))\n \n pil_image = pil_image.resize(resize_dim)\n \n # Crop image to center 224 pixles\n crop_box_dim = 224\n left = (pil_image.width - crop_box_dim)/2\n top = (pil_image.height - crop_box_dim)/2\n right = pil_image.width - (pil_image.width - crop_box_dim)/2\n bottom = pil_image.height - (pil_image.height - crop_box_dim)/2\n pil_image = pil_image.crop((left, top, right, bottom))\n \n # Update color channels\n np_image = np.array(pil_image)\n np_image_means = np.array([0.485, 0.456, 0.406])\n np_image_stddev = np.array([0.229, 0.224, 0.225])\n np_image = (np_image/255 - np_image_means) / np_image_stddev\n \n # PIL images and numpy arrays have color channels in the 3rd dimension\n # Transpose them to first dimension to match what PyTorch expects\n np_image = np_image.transpose((2,0,1))\n\n return np_image", "def get_preprocess_fn(**preprocessing_kwargs):\n\n def _preprocess_fn(data):\n \"\"\"The preprocessing function that is returned.\"\"\"\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data\n\n return _preprocess_fn", "def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()", "def process_image(self):\n\n detect.main(self.nn_args)", "def step_run(cls, config):\n\n nir_paw_image_fname = config.get(cls.step_name, 'nir_paw_image')\n nir_paw_conf_fname = config.get(cls.step_name, 'nir_paw_conf')\n output_template = config.get(cls.step_name, 'output_template')\n conf_limit = config.getfloat(cls.step_name, 'conf_limit')\n\n# logger.info('reading Comparison image from %s', comp_fname)\n# comp_im = DESImage.load(comp_fname)\n\n ret_code = cls.__call__(nir_paw_image_fname, nir_paw_conf_fname, output_template, conf_limit)\n return ret_code", "def proc_image(self, tokens):\n\n print \"IMAGE:\", tokens, tokens.asList(), tokens.keys()\n\n raise NotImplementedError", "def processing(request):\n image = get_processing_image(str(settings.BASE_DIR) + request.session['image'])\n image_name = str(request.session['image']).split('/')[-1]\n db_object = ImageProc.objects.get(id=request.session['id'])\n\n # create no illumination image\n removed_illumination = remove_uneven_illumination(image)\n removed_illumination_pil = get_pil_image(removed_illumination)\n db_object.image_illumination_removed.save('no_ilumination ' + image_name,\n ContentFile(removed_illumination_pil), save=False)\n\n # create contours\n image_contours = create_contours_image(image.copy())\n image_contours_pil = get_pil_image(image_contours)\n db_object.image_contours.save('contours ' + image_name, ContentFile(image_contours_pil), save=False)\n\n # create axes\n image_axes = create_axes_image(image.copy())\n image_axes_pil = get_pil_image(image_axes)\n db_object.image_axes.save('axes ' + image_name, ContentFile(image_axes_pil), save=False)\n\n # create CLAHE\n image_clahe = clahe_image(image.copy())\n image_clahe_pil = get_pil_image(image_clahe)\n db_object.image_clahe.save('clahe ' + image_name, ContentFile(image_clahe_pil), save=False)\n\n colour_features = colour_quantification(image)\n\n db_object.white_color = colour_features['WHITE']\n db_object.red_color = colour_features['RED']\n db_object.light_brown_color = colour_features['LIGHT_BROWN']\n db_object.dark_brown_color = colour_features['DARK_BROWN']\n db_object.blue_gray_color = colour_features['BLUE_GRAY']\n db_object.black_color = colour_features['BLACK']\n\n asymmetry_features = asymmetry_quantification(image, enable_processing_features=True)\n\n db_object.a_p_feature = asymmetry_features['a_p']\n db_object.b_p_feature = asymmetry_features['b_p']\n db_object.a_b_feature = asymmetry_features['a_b']\n db_object.b_b_feature = asymmetry_features['b_b']\n db_object.area_p_feature = asymmetry_features['A_p']\n db_object.area_c_feature = asymmetry_features['A_c']\n db_object.solidity_feature = asymmetry_features['solidity']\n db_object.extent_feature = asymmetry_features['extent']\n db_object.equivalent_diameter_feature = asymmetry_features['equivalent diameter']\n db_object.circularity_feature = asymmetry_features['circularity']\n db_object.p_p_feature = asymmetry_features['p_p']\n db_object.b_p_a_p_feature = asymmetry_features['b_p/a_p']\n db_object.b_b_a_b_feature = asymmetry_features['b_b/a_b']\n db_object.entropy_feature = asymmetry_features['entropy']\n\n db_object.save()\n\n request.user.profile.processed_images += 1\n request.user.save()\n\n return redirect('processing_app:results', request.session['id'])", "def _prepare(self):\n\n # table of containing directories (to add implicit dependencies to image builds)\n directories: dict[PurePosixPath, set[FileValue]] = dict()\n\n # map plan elements to values\n v: dict[Any, ValueBase] = dict()\n # map plan elements to actions\n a: dict[Any, Action] = dict()\n\n # Create FileValues for WorkFiles found in plan\n for workfile in self.plan.files():\n value = FileValue(workfile)\n v[workfile] = value\n directory = workfile.posix_path.parent\n if directory not in directories:\n directories[directory] = set()\n directories[directory].add(value)\n\n for image in self.plan.images():\n if image.pull_from_registry:\n image_value = RegistryImageValue(image)\n pull_image_action = PullImageAction(image)\n pull_image_action.set_output_image(image_value)\n v[image] = image_value\n a[image] = pull_image_action\n else:\n image_value = ImageToBuildValue(image)\n build_image_action = BuildImageAction(image)\n build_image_action.set_output_image(image_value)\n v[image] = image_value\n a[image] = build_image_action\n # if context dir contains any WorkFiles, add corresponding FileValues as dependencies\n for directory in directories.keys():\n if directory.is_relative_to(image_value._plan_element.build_from_context):\n for file_value in directories[directory]:\n logging.info(\"Implied dependency %s->%s\", file_value, build_image_action)\n build_image_action.add_input(file_value)\n\n for e in self.plan.execs():\n image_value = v[e.image]\n if not isinstance(image_value, ImageValue):\n raise Exception(\"not an ImageValue %s\" % image_value)\n exec_action = ExecAction(e, image_value)\n a[e] = exec_action\n for inp in e.inputs:\n exec_action.add_input(v[inp.workfile])\n v[inp.workfile].add_consumer(exec_action)\n for output in e.outputs:\n exec_action.add_output(v[output.workfile])\n v[output.workfile].set_producer(exec_action)\n\n self.actions = set(a.values())\n self.values = set(v.values())", "def _Build(self, image):\n image = _ContainerImage(image)\n build_start = time.time()\n if not FLAGS.local_container_build:\n try:\n # Build the image remotely using an image building service.\n self.RemoteBuild(image)\n self.remote_build_times[image.name] = time.time() - build_start\n return\n except NotImplementedError:\n pass\n\n self.PrePush(image)\n # Build the image locally using docker.\n build_start = time.time()\n self.LocalBuildAndPush(image)\n self.local_build_times[image.name] = time.time() - build_start", "def run(self):\n result = Image.run(self)\n reference = directives.uri(self.arguments[0])\n self.options[\"uri\"] = data_uri_from_file(reference)\n return result", "def build(self):\n self._remove_swarm_keys()\n self._remove_pod_keys()\n self._set_image()\n self._translate_docker_properties()", "def _make_process_op(self):\n\n with tf.variable_scope(\"state_preprocess\"):\n self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)\n output = tf.image.rgb_to_grayscale(self.input_state)\n output = tf.image.crop_to_bounding_box(output, 34, 0, 160, 160)\n output = tf.image.resize_images(output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n output = tf.to_float(output) / 255.0\n output = tf.transpose(output, perm=[2, 1, 0])\n\n return output", "def compose_image_meta(self, image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n\n meta = np.array([image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=class_num\n )\n return meta\n pass", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def _determine_kernel_images(self, proxy_config):\n if proxy_config.get('image_name'):\n self.kernel_image = proxy_config.get('image_name')\n self.kernel_image = os.environ.get('KERNEL_IMAGE', self.kernel_image) # support BYO Image\n\n self.kernel_executor_image = self.kernel_image # Default the executor image to current image\n if proxy_config.get('executor_image_name'):\n self.kernel_executor_image = proxy_config.get('executor_image_name')\n self.kernel_executor_image = os.environ.get('KERNEL_EXECUTOR_IMAGE', self.kernel_executor_image)", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def propagateImage(self, dryrun):\n pass", "def compose_image_meta(image_id, image_shape, window, active_class_ids):\n meta = np.array(\n [image_id] + # size=1\n list(image_shape) + # size=3\n list(window) + # size=4 (x1, y1, x2, y2) in image cooredinates\n list(active_class_ids) # size=num_classes\n )\n return meta", "def run(self):\n result = Image.run(self)\n reference = directives.uri(self.arguments[0])\n self.options['uri'] = data_uri(reference)\n return result", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def request_file(flags, image_data):\n\n with Image.open(io.BytesIO(image_data)) as img:\n proc_img = process_image_file(flags, img)\n\n return proc_img", "def prepare_image_for_deploy(runtime: \"mlrun.runtimes.BaseRuntime\"):\n pass", "def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def mode_input(self, images_info_list):\n\n molded_images_list = []\n image_metas_list = []\n windows_list = []\n\n image_mi_dim = cfg.COMMON.IMAGE_MIN_DIM\n image_max_dim = cfg.COMMON.IMAGE_MAX_DIM\n image_min_scale = cfg.COMMON.IMAGE_MIN_SCALE\n image_resize_mode = cfg.COMMON.IMAGE_RESIZE_MODE\n\n for image_info in images_info_list:\n # resize image\n molded_image, window, scale, padding, crop = self.resize_image(image_info,\n min_dim=image_mi_dim,\n min_scale=image_min_scale,\n max_dim=image_max_dim,\n resize_mode=image_resize_mode)\n\n molded_image = self.mold_image(molded_image, self.mean_pixel)\n\n # Build image_meta\n image_meta = self.compose_image_meta(0, image_info.shape, molded_image.shape, window, scale,\n np.zeros([cfg.COMMON.CLASS_NUM], dtype=np.int32))\n # Append\n molded_images_list.append(molded_image)\n image_metas_list.append(image_meta)\n windows_list.append(window)\n pass\n\n # Pack into arrays\n molded_images_list = np.stack(molded_images_list)\n image_metas_list = np.stack(image_metas_list)\n windows_list = np.stack(windows_list)\n return molded_images_list, image_metas_list, windows_list\n pass", "def build_impl(**kwargs: Any) -> None:\n try:\n config = configuration.create_transient_build_config(kwargs)\n except configuration.CLIArgumentError as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n store = image.ImageStore(backend_dir=config.image_backend, frontend_dir=None)\n builder = build.ImageBuilder(config, store)\n builder.build()\n sys.exit(0)", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def step_run(cls, image, config):\n\n min_cols = config.getint(cls.step_name, 'min_cols')\n max_cols = config.getint(cls.step_name, 'max_cols')\n interp_mask = maskbits.parse_badpix_mask(config.get(cls.step_name, 'interp_mask'))\n invalid_mask = maskbits.parse_badpix_mask(config.get(cls.step_name, 'invalid_mask'))\n add_noise = config.getboolean(cls.step_name, 'add_noise')\n clobber = config.getboolean(cls.step_name, 'clobber')\n block_size = config.getint(cls.step_name, 'block_size')\n\n kwargs = locals()\n\n logger.info(\"Will run row_zipper function with:\")\n for key in kwargs.keys():\n logger.info(\"--%s %s\", key, kwargs[key])\n\n # Now we call the function\n image.data, image.mask = cls.__call__(image.data, image.mask,\n interp_mask=interp_mask,\n min_cols=min_cols,\n max_cols=max_cols,\n invalid_mask=invalid_mask,\n add_noise=add_noise,\n block_size=block_size,\n clobber=clobber)", "def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def __init__(self,filename) :\n # create an MImage object\n self.image=om.MImage()\n # read from file MImage should handle errors for us so no need to check\n self.image.readFromFile(filename)\n # as the MImage class is a wrapper to the C++ module we need to access data\n # as pointers, to do this use the MScritUtil helpers\n self.scriptUtilWidth = om.MScriptUtil()\n self.scriptUtilHeight = om.MScriptUtil()\n\n # first we create a pointer to an unsigned in for width and height\n widthPtr = self.scriptUtilWidth.asUintPtr()\n heightPtr = self.scriptUtilHeight.asUintPtr()\n # now we set the values to 0 for each\n self.scriptUtilWidth.setUint( widthPtr, 0 )\n self.scriptUtilHeight.setUint( heightPtr, 0 )\n # now we call the MImage getSize method which needs the params passed as pointers\n #as it uses a pass by reference\n self.image.getSize( widthPtr, heightPtr )\n # once we get these values we need to convert them to int so use the helpers\n self.m_width = self.scriptUtilWidth.getUint(widthPtr)\n self.m_height = self.scriptUtilHeight.getUint(heightPtr)\n\n # now we grab the pixel data and store\n self.charPixelPtr = self.image.pixels()\n # query to see if it's an RGB or RGBA image, this will be True or False\n self.m_hasAlpha=self.image.isRGBA()\n # if we are doing RGB we step into the image array in 3's\n # data is always packed as RGBA even if no alpha present\n self.imgStep=4\n # finally create an empty script util and a pointer to the function\n # getUcharArrayItem function for speed\n scriptUtil = om.MScriptUtil()\n self.getUcharArrayItem=scriptUtil.getUcharArrayItem\n\n self.scriptUtilWidth = om.MScriptUtil()\n self.scriptUtilHeight = om.MScriptUtil()\n\n # first we create a pointer to an unsigned in for width and height\n widthPtr = self.scriptUtilWidth.asUintPtr()\n heightPtr = self.scriptUtilHeight.asUintPtr()", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def __init__(self, env, height=80, width=80, grayscale=True,\n crop=lambda img: img):\n super(PreprocessImage, self).__init__(env)\n self.img_size = (height, width)\n self.grayscale = grayscale\n self.crop = crop\n\n n_colors = 1 if self.grayscale else 3\n self.observation_space = Box(0.0, 1.0, [n_colors, height, width])", "def prepare_image(self, image_info, configdrive=None):\n LOG.debug('Preparing image %s', image_info['id'])\n # NOTE(dtantsur): backward compatibility\n if configdrive is None:\n configdrive = image_info.pop('configdrive', None)\n device = hardware.dispatch_to_managers('get_os_install_device',\n permit_refresh=True)\n\n disk_format = image_info.get('disk_format')\n stream_raw_images = image_info.get('stream_raw_images', False)\n # don't write image again if already cached\n if self.cached_image_id != image_info['id']:\n if self.cached_image_id is not None:\n LOG.debug('Already had %s cached, overwriting',\n self.cached_image_id)\n\n if stream_raw_images and disk_format == 'raw':\n if image_info.get('image_type') == 'partition':\n self.partition_uuids = _write_partition_image(None,\n image_info,\n device,\n configdrive)\n stream_to = self.partition_uuids['partitions']['root']\n else:\n self.partition_uuids = {}\n stream_to = device\n\n self._stream_raw_image_onto_device(image_info, stream_to)\n else:\n self._cache_and_write_image(image_info, device, configdrive)\n\n _validate_partitioning(device)\n\n # For partition images the configdrive creation is taken care by\n # partition_utils.work_on_disk(), invoked from either\n # _write_partition_image or _cache_and_write_image above.\n # Handle whole disk images explicitly now.\n if image_info.get('image_type') != 'partition':\n if configdrive is not None:\n # Will use dummy value of 'local' for 'node_uuid',\n # if it is not available. This is to handle scenario\n # wherein new IPA is being used with older version\n # of Ironic that did not pass 'node_uuid' in 'image_info'\n node_uuid = image_info.get('node_uuid', 'local')\n partition_utils.create_config_drive_partition(node_uuid,\n device,\n configdrive)\n\n self._fix_up_partition_uuids(image_info, device)\n msg = 'image ({}) written to device {} '\n result_msg = _message_format(msg, image_info, device,\n self.partition_uuids)\n LOG.info(result_msg)\n return result_msg", "def process_input(args, phil_args, paramfile=None, gui=False, write_files=False):\n\n working_phil, bad_args = get_input_phil(\n phil_args=phil_args, ha14=args.ha14, paramfile=paramfile, gui=gui\n )\n\n # Perform command line check and modify params accordingly\n params = working_phil.extract()\n if args.ha14:\n params.advanced.processing_backend = \"ha14\"\n\n if not params.description:\n from iota import now\n\n params.description = \"IOTA parameters auto-generated on {}\".format(now)\n\n if not params.output:\n params.output = os.path.abspath(os.curdir)\n\n # Check for -r option and set random subset parameter\n if args.random > 0:\n params.data_selection.random_sample.flag_on = True\n params.data_selection.random_sample.number = args.random[0]\n\n if args.range:\n params.data_selection.image_range.flag_on = True\n params.data_selection.image_range.range = args.range\n\n # Set temporary folder path\n if args.tmp is not None:\n params.advanced.temporary_output_folder = args.tmp[0]\n\n # Check for -n option and set number of processors override\n # (for parallel map only, for now)\n from multiprocessing import cpu_count\n\n max_proc = int(cpu_count() * 3 / 4)\n nproc = args.nproc[0] if isinstance(args.nproc, list) else args.nproc\n if nproc != 0:\n if nproc >= max_proc:\n params.mp.n_processors = max_proc\n else:\n params.mp.n_processors = nproc\n elif params.mp.method == \"multiprocessing\":\n if params.mp.n_processors >= max_proc or params.mp.n_processors == 0:\n params.mp.n_processors = int(max_proc / 2)\n\n working_phil = working_phil.format(python_object=params)\n\n if write_files:\n write_defaults(\n os.path.abspath(os.curdir),\n working_phil.as_str(),\n params.advanced.processing_backend,\n )\n\n return working_phil, bad_args", "def process_image(self):\n if not os.path.isfile(self.output_file) or self.gallery.generator.settings[\"GALLERY_REGENERATE_EXISTING\"]:\n \n # Actions should be processed in order of appearance in actions array\n for i in range(len(self.preset[\"actions\"])):\n a = self.preset[\"actions\"][i]\n\n if a[\"type\"] == \"fit\":\n if not \"from\" in a:\n a[\"from\"] = (0.5, 0.5) # crop from middle by default\n\n self.image = ImageOps.fit(self.image, (a[\"width\"], a[\"height\"],), method=Image.ANTIALIAS, centering=a[\"from\"])\n \n if a[\"type\"] == \"greyscale\":\n self.image = ImageOps.grayscale(self.image)\n\n if a[\"type\"] == \"resize\":\n self.image.thumbnail((a[\"width\"], a[\"height\"]), Image.NEAREST)\n \n # TODO: Write other useful transforms here!\n \n\n self.image.save(self.output_file, \"JPEG\")", "def main(params):\n mpi_vs_multiprocess_logging(\"process\", params)\n\n ifg_paths = []\n for ifg_path in params[cf.INTERFEROGRAM_FILES]:\n ifg_paths.append(ifg_path.sampled_path)\n\n rows, cols = params[\"rows\"], params[\"cols\"]\n\n return process_ifgs(ifg_paths, params, rows, cols)", "def run_image_editor( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"gimp\", \"-adfs\", self.record[\"filename\"]] )", "def preprocessor(image_path, binary=True):\n img = cv2.imread (image_path)\n shape = cv2.resize (img, (int (200), int (64))).shape[:2]\n\n # Binary\n if binary:\n brightness = 0\n contrast = 50\n img = np.int16 (img)\n img = img * (contrast / 127 + 1) - contrast + brightness\n img = np.clip (img, 0, 255)\n img = np.uint8 (img)\n\n img = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)\n T = threshold_local (img, 11, offset=10, method=\"gaussian\")\n img = (img > T).astype (\"uint8\") * 255\n\n # Increase line width\n kernel = np.ones ((3, 3), np.uint8)\n img = cv2.erode (img, kernel, iterations=1)\n else:\n img = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)\n\n return img", "def build_image(self, df_path):\n cmd = \"{} build -f {} -t {} .\".format(\n self.binary, df_path, self.vars['image'])\n LOG.info(\"Running: {}\".format(cmd))\n res = subprocess.run(cmd, shell=True)\n if res.returncode != 0:\n sys.exit(2)\n return res", "def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'", "def preprocess_graph(self):\n image = tf.placeholder(\n tf.float32,\n shape=[self.img_h, self.img_w, self.col_channels])\n patches = self.create_patches(image)\n return {'image': image,\n 'patches': patches}", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "async def _process_image(self, image: Image) -> Optional[Image]:\n\n # not enabled?\n if not self._enabled:\n return None\n\n # we only accept OBJECT images\n if image.header[\"IMAGETYP\"] != \"object\":\n return None\n\n # reference header?\n if self._ref_header is None:\n log.info(\"Setting new reference image...\")\n await self._reset_guiding(image=image)\n return None\n\n # check RA/Dec in header and separation\n c1 = SkyCoord(ra=image.header[\"TEL-RA\"] * u.deg, dec=image.header[\"TEL-DEC\"] * u.deg, frame=\"icrs\")\n c2 = SkyCoord(ra=self._ref_header[\"TEL-RA\"] * u.deg, dec=self._ref_header[\"TEL-DEC\"] * u.deg, frame=\"icrs\")\n separation = c1.separation(c2).deg\n if self._separation_reset is not None and separation * 3600.0 > self._separation_reset:\n log.warning(\n 'Nominal position of reference and new image differ by %.2f\", resetting reference...',\n separation * 3600.0,\n )\n await self._reset_guiding(image=image)\n return None\n\n # check filter\n if (\n self._reset_at_filter\n and \"FILTER\" in image.header\n and \"FILTER\" in self._ref_header\n and image.header[\"FILTER\"] != self._ref_header[\"FILTER\"]\n ):\n log.warning(\"The filter has been changed since the last exposure, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n\n # get time\n date_obs = Time(image.header[\"DATE-OBS\"])\n\n # check times and focus\n if self._last_header is not None:\n # check times\n t0 = Time(self._last_header[\"DATE-OBS\"])\n if (date_obs - t0).sec > self._max_interval:\n log.warning(\"Time between current and last image is too large, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n if (date_obs - t0).sec < self._min_interval:\n log.warning(\"Time between current and last image is too small, ignoring image...\")\n return None\n\n # check focus\n if (\n \"TEL-FOCU\" in image.header\n and self._reset_at_focus\n and abs(image.header[\"TEL-FOCU\"] - self._last_header[\"TEL-FOCU\"]) > 0.05\n ):\n log.warning(\"Focus difference between current and last image is too large, resetting reference...\")\n await self._reset_guiding(image=image)\n return None\n\n # exposure time too large?\n if self._max_exposure_time is not None and image.header[\"EXPTIME\"] > self._max_exposure_time:\n log.warning(\"Exposure time too large, skipping auto-guiding for now...\")\n self._loop_closed = False\n return None\n\n # remember header\n self._last_header = image.header\n\n # get offset\n image = await self.run_pipeline(image)\n\n # get telescope\n try:\n telescope = await self.proxy(self._telescope, ITelescope)\n except ValueError:\n log.error(\"Given telescope does not exist or is not of correct type.\")\n self._loop_closed = False\n return image\n\n # apply offsets\n try:\n if await self._apply(image, telescope, self.location):\n self._loop_closed = True\n log.info(\"Finished image.\")\n else:\n log.info(\"Could not apply offsets.\")\n self._loop_closed = False\n except ValueError as e:\n log.info(\"Could not apply offsets: %s\", e)\n self._loop_closed = False\n\n # return image, in case we added important data\n return image", "def init_func() -> JobInitStateReturn:\r\n log_to_file('Frame')\r\n\r\n if config_main.APPL_INPUT == config_main.IMAGE_INPUT:\r\n log_to_file('Image Name')\r\n\r\n log_to_file('Raw Pict Size')\r\n\r\n # noinspection PyUnresolvedReferences\r\n return JobInitStateReturn(True if global_var_handler.NR_PICTURES != 0 else False)", "def openAndPreProcessImage(path, copyOrig=False, preproc={}):\n try:\n im = Image.open(path).convert('L') #Open as a uint8 image\n except FileNotFoundError:\n print(f'Error: {path} not found')\n return\n except OSError:\n print(f'Error: Cannot open {path}, please check image formats supported by PIL.Image')\n return\n im = np.asarray(im)#[125:375,125:375] #Take a smaller region for speed\n \n # Also return an unprocessed copy of original image, if required\n im_orig = im.copy() if copyOrig else None\n \n return preProcessImage(im, **preproc), im_orig", "def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)", "def write_component_image_info_area(pldm_fw_up_pkg, metadata, image_files):\n components = metadata[\"ComponentImageInformationArea\"]\n # ComponentImageCount\n pldm_fw_up_pkg.write(struct.pack(\"<H\", len(components)))\n component_location_offsets = []\n # ComponentLocationOffset position in individual component image\n # information\n component_location_offset_pos = 12\n\n for component in components:\n # Record the location of the ComponentLocationOffset to be updated\n # after appending images to the firmware update package\n component_location_offsets.append(\n pldm_fw_up_pkg.tell() + component_location_offset_pos\n )\n\n # ComponentClassification\n component_classification = component[\"ComponentClassification\"]\n if component_classification < 0 or component_classification > 0xFFFF:\n sys.exit(\n \"ERROR: ComponentClassification should be [0x0000 - 0xFFFF]\"\n )\n\n # ComponentIdentifier\n component_identifier = component[\"ComponentIdentifier\"]\n if component_identifier < 0 or component_identifier > 0xFFFF:\n sys.exit(\"ERROR: ComponentIdentifier should be [0x0000 - 0xFFFF]\")\n\n # ComponentComparisonStamp\n component_comparison_stamp = get_component_comparison_stamp(component)\n\n # ComponentOptions\n component_options = bitarray(16, endian=\"little\")\n component_options.setall(0)\n supported_component_options = [0, 1, 2]\n for option in component[\"ComponentOptions\"]:\n if option not in supported_component_options:\n sys.exit(\n \"ERROR: unsupported ComponentOption in \"\n \" ComponentImageInformationArea section\"\n )\n component_options[option] = 1\n\n # RequestedComponentActivationMethod\n requested_component_activation_method = bitarray(16, endian=\"little\")\n requested_component_activation_method.setall(0)\n supported_requested_component_activation_method = [0, 1, 2, 3, 4, 5]\n for option in component[\"RequestedComponentActivationMethod\"]:\n if option not in supported_requested_component_activation_method:\n sys.exit(\n \"ERROR: unsupported RequestedComponent \"\n \" ActivationMethod entry\"\n )\n requested_component_activation_method[option] = 1\n\n # ComponentLocationOffset\n component_location_offset = 0\n # ComponentSize\n component_size = 0\n # ComponentVersionStringType\n component_version_string_type = string_types[\"ASCII\"]\n # ComponentVersionStringlength\n # ComponentVersionString\n component_version_string = component[\"ComponentVersionString\"]\n check_string_length(component_version_string)\n\n format_string = \"<HHIHHIIBB\" + str(len(component_version_string)) + \"s\"\n pldm_fw_up_pkg.write(\n struct.pack(\n format_string,\n component_classification,\n component_identifier,\n component_comparison_stamp,\n ba2int(component_options),\n ba2int(requested_component_activation_method),\n component_location_offset,\n component_size,\n component_version_string_type,\n len(component_version_string),\n component_version_string.encode(\"ascii\"),\n )\n )\n\n index = 0\n pkg_header_checksum_size = 4\n start_offset = pldm_fw_up_pkg.tell() + pkg_header_checksum_size\n # Update ComponentLocationOffset and ComponentSize for all the components\n for offset in component_location_offsets:\n file_size = os.stat(image_files[index]).st_size\n pldm_fw_up_pkg.seek(offset)\n pldm_fw_up_pkg.write(struct.pack(\"<II\", start_offset, file_size))\n start_offset += file_size\n index += 1\n pldm_fw_up_pkg.seek(0, os.SEEK_END)", "def compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta", "def imageprepare(self,argv):\r\n\t\tim = Image.open(argv).convert('L')\r\n\t\twidth = float(im.size[0])\r\n\t\theight = float(im.size[1])\r\n\t\tnewImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\r\n\r\n\t\tif width > height: # check which dimension is bigger\r\n\t\t\t# Width is bigger. Width becomes 20 pixels.\r\n\t\t\tnheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\r\n\t\t\tif nheight == 0: # rare case but minimum is 1 pixel\r\n\t\t\t\tnheight = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twtop = int(round(((28 - nheight) / 2), 0)) # caculate horizontal pozition\r\n\t\t\tnewImage.paste(img, (4, wtop)) # paste resized image on white canvas\r\n\t\telse:\r\n\t\t\t# Height is bigger. Heigth becomes 20 pixels.\r\n\t\t\tnwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\r\n\t\t\tif (nwidth == 0): # rare case but minimum is 1 pixel\r\n\t\t\t\tnwidth = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\r\n\t\t\tnewImage.paste(img, (wleft, 4)) # paste resized image on white canvas\r\n\r\n\t\t# newImage.save(\"sample.png\")\r\n\r\n\t\ttv = list(newImage.getdata()) # get pixel values\r\n\r\n\t\t# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n\t\ttva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n\t\treturn tva", "def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )", "def _prepare_image(self, image, initial_shape, gt_shape=None):\n image.landmarks['initial_shape'] = initial_shape\n image = image.rescale_to_reference_shape(\n self.reference_shape, group='initial_shape',\n interpolator=self.interpolator)\n\n if gt_shape:\n image.landmarks['gt_shape'] = initial_shape\n\n if self.n_levels > 1:\n if self.scaled_levels:\n pyramid = image.gaussian_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n else:\n pyramid = image.smoothing_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n images = [compute_features(i, self.feature_type)\n for i in pyramid]\n images.reverse()\n else:\n images = [compute_features(image, self.feature_type)]\n\n return images", "def process_image(image):\n\t#camera constants and knowns\n\timage_size = (1920, 1080)\n\theight = 1 #in meters please\n\tglobal img\n\timg = cv2.imread(str(image))\n\tcv2.namedWindow('image')\n\tcv2.setMouseCallback('image',draw_points)\n\n\twhile(1):\n\t cv2.imshow('image',img)\n\t k = cv2.waitKey(1) & 0xFF\n\t if k == 27:\n\t break\n\n\tcv2.destroyAllWindows()", "def process_image(image):\r\n image = random_brightness(image)\r\n image = crop_image(image)\r\n image = resize(image)\r\n return image", "def __init__(self):\n # Effective batch size\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n\n # Input image size\n if self.IMAGE_RESIZE_MODE == \"crop\":\n self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,\n self.IMAGE_CHANNEL_COUNT])\n else:\n self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,\n self.IMAGE_CHANNEL_COUNT])\n\n # Image meta data length\n # See compose_image_meta() for details\n self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES", "def update(self):\n if self.var_info.bits_per_pixel == 1:\n b = self._img.tobytes(\"raw\", \"1;R\")\n self.mmap[:len(b)] = b\n\n elif self.var_info.bits_per_pixel == 16:\n self.mmap[:] = self._img_to_rgb565_bytes()\n\n elif self.var_info.bits_per_pixel == 32:\n self.mmap[:] = self._img.convert(\"RGB\").tobytes(\"raw\", \"XRGB\")\n\n else:\n raise Exception(\"Not supported - platform %s with bits_per_pixel %s\" %\n (self.platform, self.var_info.bits_per_pixel))", "def execute(self, image: sitk.Image, params: MultiModalRegistrationParams = None) -> sitk.Image:\n\n if params is None:\n raise ValueError(\"params is not defined\")\n dimension = image.GetDimension()\n if dimension not in (2, 3):\n raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension))\n\n # set a transform that is applied to the moving image to initialize the registration\n if self.registration_type == RegistrationType.BSPLINE:\n transform_domain_mesh_size = [10] * image.GetDimension()\n initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size)\n else:\n if self.registration_type == RegistrationType.RIGID:\n transform_type = sitk.VersorRigid3DTransform() if dimension == 3 else sitk.Euler2DTransform()\n elif self.registration_type == RegistrationType.AFFINE:\n transform_type = sitk.AffineTransform(dimension)\n elif self.registration_type == RegistrationType.SIMILARITY:\n transform_type = sitk.Similarity3DTransform() if dimension == 3 else sitk.Similarity2DTransform()\n else:\n raise ValueError('not supported registration_type')\n\n initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image,\n image.GetPixelIDValue()),\n image,\n transform_type,\n sitk.CenteredTransformInitializerFilter.GEOMETRY)\n\n self.registration.SetInitialTransform(initial_transform, inPlace=True)\n\n if params.fixed_image_mask:\n self.registration.SetMetricFixedMask(params.fixed_image_mask)\n\n if params.callbacks is not None:\n for callback in params.callbacks:\n callback.set_params(self.registration, params.fixed_image, image, initial_transform)\n\n self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32),\n sitk.Cast(image, sitk.sitkFloat32))\n\n if self.verbose:\n print('MultiModalRegistration:\\n Final metric value: {0}'.format(self.registration.GetMetricValue()))\n print(' Optimizer\\'s stopping condition, {0}'.format(\n self.registration.GetOptimizerStopConditionDescription()))\n elif self.number_of_iterations == self.registration.GetOptimizerIteration():\n print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!')\n\n return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0,\n image.GetPixelIDValue())", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def execute(self, images_and_density_maps):\n raise NotImplementedError(\"execute method not implemented in the child class\")", "def create_image_db():\n logging.info('=============> create_image_db: create image metadata json mapper file <===========')\n load_all_map_dir(manifest_map_dir, layer_json_map_dir, layer_config_map_dir)\n print \"create pool\"\n P = multiprocessing.Pool(60)\n print \"before map!\"\n print len(manifest_names) #process_manifest\n print len(layer_json_map_dir)\n print \"before map!\"\n #json_datas = []\n #for i in manifest_names:\n # json_datas.append(process_manifest(i))\n json_datas = P.map(process_manifest, manifest_names)\n print \"after map\"\n print \"write to files!\"\n write_json_datas(json_datas)", "def run(self,image, label, featureClasses, settings, enabledImageTypes,csvFile):\n print('Processing started')\n import time\n startTime = time.time()\n # grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(imageNode.GetName()))\n grayscaleImage = sitk.ReadImage(image)\n #sitkUtils.PushToSlicer(label, labelNode.GetName(), overwrite=True, compositeView=2)\n labelsDict = {}\n if label:\n print(\"label={}\".format(label))\n labelsDict = self.prepareLabelsFromLabelmap(label, grayscaleImage, labelsDict)\n # if segmentationNode:\n # labelsDict = self.prepareLabelsFromSegmentation(segmentationNode, grayscaleImage, labelsDict)\n\n #self.featureValues = extractor.execute(grayscaleImage, labelImage, images, **kwargs)\n featuresDict = {}\n for l in labelsDict.keys():\n print(\"Calculating features for \"+l)\n try:\n featuresDict[l] = self.calculateFeatures(grayscaleImage,\n labelsDict[l],\n featureClasses,\n settings,\n enabledImageTypes)\n except:\n print('calculateFeatures() failed')\n traceback.print_exc()\n self.saveFeatures2CSVFile(featuresDict,csvFile)\n print(\"Completed\")\n endtime = time.time()\n print(\"totalTime={}\".format(endtime-startTime))\n # return featuresDict", "def buildRunDict(self):\n self.showProgressBar()\n ori_images = 0\n if self.img_exist:\n ori_images = len(listDirectory(self.savePathJoin(\"Images\")))\n self.buildRunDictMain(ori_images)\n else:\n self.run_dict[\"Video\"] = {\n \"Run\": True,\n \"Progress\": ori_images,\n \"Text\": \"Preparing video\",\n }\n self.buildParamsDict()\n self.params_dict[\"send_video_frame\"] = True\n\n self.progressLabel.setText(\"Create images from video\")\n\n self.worker = calcRunner.CalculationRunner(self.params_dict) # no parent!\n self.thread = QThread() # no parent!\n\n self.worker.labelUpdate.connect(self.labelUpdate)\n\n self.worker.update.connect(self.progressUpdate)\n self.worker.videoFrame.connect(self.setVidFrame)\n\n self.worker.moveToThread(self.thread)\n self.thread.started.connect(self.worker.startThread)\n self.thread.start()", "def run(self, image):\n # width, height = image.size\n # resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n # target_size = (int(resize_ratio * width), int(resize_ratio * height))\n target_size = (self.INPUT_SIZE, self.INPUT_SIZE)\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n net_image = resized_image\n if params.HZ_preprocess_activate:\n net_image = params.image_preprocess_func(resized_image)\n net_image = np.expand_dims(net_image, axis=-1)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(net_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map", "def _getInputImage (self, input, plist):\n\n # Extract the instrument name for the data that is being processed by Multidrizzle\n _instrument = plist['exposure'].header['INSTRUME']\n \n # Determine the instrument detector in use. NICMOS is a special case because it does\n # not use the 'DETECTOR' keyword. It instead used 'CAMERA' to identify which of it's\n # 3 camera's is in use. All other instruments support the 'DETECTOR' keyword.\n if (_instrument == 'NICMOS'):\n _detector = plist['exposure'].header['CAMERA']\n else:\n _detector = plist['exposure'].header['DETECTOR']\n \n # Extract the plate scale in use by the detector\n _platescale = plist['exposure'].pscale\n if _platescale == None:\n raise ValueError, 'The plate scale has a value of -- None -- '\n \n # Extract the dq array designation\n _dqname = plist['exposure'].dqname\n if _instrument != 'WFPC2':\n _dq_root,_dq_extn = fileutil.parseFilename(_dqname)\n _dqname = plist['orig_filename']+'['+_dq_extn+']'\n\n if _instrument == 'ACS':\n if _detector == 'HRC': return HRCInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 'WFC': return WFCInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 'SBC': return SBCInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _instrument == 'WFPC2':\n if _detector == 1: return PCInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 2: return WF2InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 3: return WF3InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 4: return WF4InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _instrument == 'STIS': \n if _detector == 'CCD': return CCDInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 'FUV-MAMA': return FUVInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 'NUV-MAMA': return NUVInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _instrument == 'NICMOS':\n if _detector == 1: return NIC1InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 2: return NIC2InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 3: return NIC3InputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _instrument == 'WFC3':\n if _detector == 'UVIS': return WFC3UVISInputImage(input,_dqname,_platescale,memmap=0,proc_unit=self.proc_unit)\n if _detector == 'IR': return WFC3IRInputImage(input,_dqname,_platescale,memmap =0,proc_unit=self.proc_unit)\n\n # If a supported instrument is not detected, print the following error message\n # and raise an exception.\n msg = 'Instrument: ' + str(_instrument) + '/' + str(_detector) + ' not yet supported!'\n raise ValueError, msg", "def display_preprocessed(env,frame):\n env.reset()\n\n #Plot the figure\n plt.figure()\n\n #Show the pre processed frame\n plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap=\"gray\")\n\n #Add title\n plt.title('Pre Processed image')\n\n #Show the plot\n plt.show()", "def preprocess_main():", "def postprocess( # type: ignore[override]\n self,\n result: Mapping[str, Optional[torch.Tensor]],\n *,\n img_size: Tuple[int, int],\n output_height: int,\n output_width: int,\n **kwargs: Any,\n ) -> Dict[str, Optional[torch.Tensor]]:\n r: Optional[torch.Tensor] = result.get(self.output_key, None) # image\n if r is None:\n return {self.output_key: None}\n r = r[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n r = torch.nn.functional.interpolate(\n r, size=(output_height, output_width), mode=\"bilinear\", align_corners=False,\n )[0]\n return {self.output_key: r}", "def postprocess( # type: ignore[override]\n self,\n result: Mapping[str, Optional[torch.Tensor]],\n *,\n img_size: Tuple[int, int],\n output_height: int,\n output_width: int,\n **kwargs: Any,\n ) -> Dict[str, Optional[torch.Tensor]]:\n r: Optional[torch.Tensor] = result.get(self.output_key, None) # image\n if r is None:\n return {self.output_key: None}\n r = r[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n r = torch.nn.functional.interpolate(\n r, size=(output_height, output_width), mode=\"bilinear\", align_corners=False,\n )[0]\n return {self.output_key: r}" ]
[ "0.6546748", "0.6495667", "0.6287394", "0.57940394", "0.57754576", "0.57309806", "0.5728724", "0.5704282", "0.5697228", "0.565087", "0.56456286", "0.56390184", "0.5626983", "0.5626955", "0.555883", "0.5555184", "0.54956263", "0.54702634", "0.54599035", "0.54391104", "0.54306674", "0.54042053", "0.5387501", "0.5346556", "0.5329369", "0.53244376", "0.5309021", "0.53047824", "0.52686596", "0.5246656", "0.5235861", "0.5219412", "0.52136844", "0.52058876", "0.52025414", "0.5193374", "0.5171095", "0.5169648", "0.5121376", "0.50931776", "0.50685465", "0.50681674", "0.5060199", "0.50601715", "0.5047446", "0.5039441", "0.5037585", "0.5017846", "0.50090003", "0.50070286", "0.49905124", "0.49895287", "0.4983354", "0.4966223", "0.49575502", "0.4955728", "0.49550447", "0.4950964", "0.49506137", "0.49497756", "0.49461448", "0.49433532", "0.49322516", "0.4924345", "0.4923742", "0.4911328", "0.49102843", "0.4909655", "0.4909145", "0.49063662", "0.4902317", "0.48969406", "0.48964253", "0.4891469", "0.4887934", "0.48805186", "0.48797667", "0.4872754", "0.48725733", "0.48716784", "0.48692134", "0.48683757", "0.48682505", "0.4862462", "0.48571005", "0.48559088", "0.48550433", "0.4853957", "0.48539102", "0.4853345", "0.48512352", "0.48503023", "0.48442575", "0.48307323", "0.48275164", "0.48212242", "0.48205182", "0.48189205", "0.48156846", "0.48156846" ]
0.6391158
2
Initialize a two dimensional list as a matrix
def __init__(self, initArray): for row in initArray: for elem in row: if type(elem) is not int: raise TypeError n = len(initArray[0]) if not all(len(x) == n for x in initArray): raise ArithmeticError self.array = initArray return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrixlist(inputlist, converter=proper, fake=False):\n if converter is None:\n converter = type(inputlist[0][0])\n xlen = len(inputlist[0])\n for x in xrange(1,len(inputlist)):\n if len(inputlist[x]) != xlen:\n raise IndexError(\"Unequal matrix row lengths for matrixlist of \"+str(xlen)+\" and \"+str(len(inputlist[x])))\n out = matrix(len(inputlist), xlen, converter=converter, fake=bool(fake))\n out.a = inputlist[:]\n out.convert()\n return out", "def matrix_init(sizex, sizey):\n return [[0]*sizey for i in range(sizex)]", "def fromList(cls, elems, **kwargs):\n if not ('m' in kwargs or 'n' in kwargs):\n raise ValueError(\"at least one of m and n must be specified\")\n m = kwargs.get('m')\n n = kwargs.get('n')\n num_elems = len(elems)\n if m is None:\n m = num_elems // n\n elif n is None:\n n = num_elems // m\n elif m * n != num_elems:\n raise ValueError(\"dimension does not match number of elements in\"\n \"list\")\n\n data = [elems[i * n: i * (n + 1)] for i in range(m)]\n return Matrix(m, n, data)", "def __init__(self,L):\n list.__init__(self,L)\n self.list = L\n i,j = LTMatrix.getRowColumn(len(L) - 1)\n assert i == j, \"Not a LTMatrix\"\n self.dimension = i + 1", "def init_from(cls, matrix: list) -> object:\n return cls(tool.flatten(matrix))", "def rowmatrixlist(inputlist=None, converter=proper, func=None, fake=False, clean=False):\n if inputlist is None:\n inputlist = []\n if func is None:\n func = rowmatrixlist\n outlist = []\n for item in inputlist:\n if islist(item):\n item = func(item)\n if not clean or not isnull(item):\n outlist.append(item)\n out = matrix(1, len(outlist), converter=converter, fake=fake)\n for x in xrange(0, len(outlist)):\n out.store(0,x, outlist[x])\n return out", "def make_matrix(rows, columns):\n\tmatrix = []\n\tfor row in range(rows):\n\t\tmatrix += [[0] * columns]\n\t\t\n\treturn matrix", "def create_matrix(list_of_edges, n):\n matrix = [[0 for i in range(n)] for j in range(n)]\n ind = 0\n for i in range(n):\n for j in range(i):\n matrix[i][j] = list_of_edges[ind]\n matrix[j][i] = list_of_edges[ind]\n ind += 1\n return matrix", "def make_two_dim_list(number_of_lists, number_of_points_in_list):\r\n z = [[''] * number_of_points_in_list for i in range(number_of_lists)]\r\n return z", "def create_matrix(n, m):\n matrix = [[None]*m for i in range(n)]\n return matrix", "def make_matrix(rows, cols, value=0):\n return Matrix([[value for i in range(cols)] for j in range(rows)])", "def make_matrix(sizex, sizey):\n return [[0]*sizey for i in xrange(sizex)]", "def make_matrix(num_rows, num_cols, entry_fn):\r\n return [[entry_fn(i,j) # given i, create a list\r\n for j in range(num_cols)] # [entry_fn(i,0),....]\r\n for i in range(num_rows)] # create one list for each i\r", "def make_matrix(sizex, sizey):\n return [[0] * sizey for i in range(sizex)]", "def __init__(self, record_list=[[]]) -> None:\n self.record_list = record_list\n self.all_item = []\n # all items in the record to build the n*n matrix\n for record in record_list:\n if record[0] not in self.all_item:\n self.all_item.append(record[0])\n if record[1] not in self.all_item:\n self.all_item.append(record[1])\n self.item_num = len(self.all_item)\n # number od unique items\n self.item_mat = np.zeros(shape=(self.item_num, self.item_num))\n # The matrix, considered as the Massey matrix in MasseyRanking, fo example.\n self.ranking = []\n # A copy of list to return.", "def from_list(cls, l):\n max_cols = max([len(r) for r in l])\n output = cls(len(l), max_cols)\n\n for i, r in enumerate(l):\n for j, c in enumerate(r):\n output[i, j] = c\n\n return output", "def make_matrix(num_rows: int,\n num_cols: int,\n entry_fn: Callable[[int, int], float]) -> Matrix:\n return [[entry_fn(i, j) # given i, create a list\n for j in range(num_cols)] # [entry_fn(i, 0), ... ]\n for i in range(num_rows)] # create one list for each i", "def __init__(self, *rows):\n self._width = 0\n self._height = 0\n self._value = list()\n\n if rows:\n # if the value passed into the constructor is a matrix\n if len(rows) == 1 and type(rows[0]) == type(self):\n for row in rows[0].value:\n newRow = list()\n for item in row: # this should make a deep copy.\n newRow.append(item)\n self.addRow(*newRow)\n # if the value passed into the constructor is a two-dimensional list\n elif len(rows) == 1 and type(rows[0]) in MATRIX_VALID_COLLECTIONS and \\\n type(rows[0][0]) in MATRIX_VALID_COLLECTIONS:\n for row in rows[0]:\n newRow = list()\n for item in row: # this should make a deep copy.\n newRow.append(item)\n self.addRow(*newRow)\n # if the value passed into the constructor is a list followed by a matrix width\n elif ((len(rows) == 2) and (type(rows[0]) in MATRIX_VALID_COLLECTIONS) and (\n type(rows[1]) in MATRIX_VALID_INTS)):\n if (len(rows[0]) % rows[1]):\n raise ValueError(\n 'Invalid list length for matrix construction, must be a multiple of width argument')\n newRow = list()\n for i in range(len(rows[0])):\n if (i and (not i % rows[1])): # i > 0 and a multiple of the given width.\n self.addRow(*newRow)\n newRow = list()\n newRow.append(rows[0][i])\n # when we get here, there should still be one row left in the \"buffer\", so\n self.addRow(*newRow)\n # if the value passed into the constructor is several lists\n else:\n for row in rows:\n if not (type(row) in MATRIX_VALID_COLLECTIONS):\n raise TypeError(\"Constructor arguments must be of type 'list' or 'tuple'\") # fix this!\n self.addRow(*row)", "def create2d(row_count, col_count, value=None):\n a = [None] * row_count\n for row in range(row_count):\n a[row] = [value] * col_count\n return a", "def Matrix(arg0: List[List[complex]]) -> ngsolve.bla.MatrixC:", "def __init__(self, rows, cols):\n if rows <= 0:\n raise ValueError('Number of matrix rows must be greater than zero.')\n if cols <= 0:\n raise ValueError('Number of matrix cols must be greater than zero.')\n\n self.__rows = rows\n self.__cols = cols\n\n # Create the matrix and initialize all elements to zero\n self.__m = []\n for i in range(1, self.__rows + 1):\n row = []\n for j in range(1, self.__cols + 1):\n row.append(0)\n self.__m.append(row)", "def list2matrix(image_list):\n\tflatten_list = []\n\tfor image in image_list:\n\t\tflatten_list.append(image.ravel())\n\n\tmatrix = np.vstack(flatten_list)\n\n\treturn matrix", "def set_up_matrix():\n matrix= []\n row= \"1 9 3 4 5\"\n row= to_int(row)\n matrix.append(row)\n row= \"2 30 4 5 6\"\n row= to_int(row)\n matrix.append(row)\n row= \"3 8 5 6 7\"\n row= to_int(row)\n matrix.append(row)\n row= \"4 5 6 7 8\"\n row= to_int(row)\n matrix.append(row)\n row= \"5 6 7 8 9\"\n row= to_int(row)\n matrix.append(row)\n return matrix", "def create_matrix(x, y, fill=''):\n return [[fill for p in range(x)] for p in range(y)]", "def create_matrix(line):\n\tlst = line.split()\n\tn = int(len(lst)**.5)\n\treturn [lst[i*n:i*n+n] for i in range(n)]", "def __init__(self, n):\n\t\tself._matr = []\n\t\tfor i in range(n):\n\t\t\tself._matr.append([])\n\t\t\tfor j in range(n):\n\t\t\t\tself._matr[i].append(False)", "def make_matrix(num_rows, num_cols, entry_fn):\n return [[entry_fn(i, j) for j in list(range(num_cols))]\n for i in list(range(num_rows))]", "def to_matrix(expr):\r\n # if expr is a list of lists, and is rectangular, then return Matrix(expr)\r\n if not type(expr) == list:\r\n return expr\r\n for row in expr:\r\n if (not type(row) == list):\r\n return expr\r\n rdim = len(expr[0])\r\n for row in expr:\r\n if not len(row) == rdim:\r\n return expr\r\n return sympy.Matrix(expr)", "def init_zero_matrix(self,rows,cols):\n\t\ttmpMatrix = []\n\t\tfor i in range(rows):\n\t\t\ttmp = [0 for j in range(cols)]\n\t\t\ttmpMatrix.append(tmp)\n\t\treturn tmpMatrix", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def make_matrix(num_rows, num_cols, entry_fn):\n return [[entry_fn(i, j)\n for j in range(num_cols)]\n for i in range(num_rows)]", "def Matrix(row, col, val=0):\n\treturn [[val for _j in xrange(col)] for _i in xrange(row)]", "def crear_matrix(nxn):\n matrix =[]\n for i in range(nxn):\n matrix.append([])\n for e in range(nxn):\n matrix[i].append(\"\")\n return matrix", "def __init__(self, n):\r\n self.matr = []\r\n self.n = n\r\n for i in range(n):\r\n self.matr.append([])\r\n for j in range(n):\r\n self.matr[i].append(False)", "def as_matrix(self) -> types.Matrix:", "def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix:\n return [[entry_fn(i, j) for j in range(num_cols)] for i in range(num_rows)]", "def displayAsMatrix(lists):\r\n for lst in lists:\r\n print(lst)", "def matrixElementMultiply (self, value, list):\n\t\tnumRows = len(list)\n\t\tnumCols = len(list[0])\n\t\n\t\treturn [[value * list[j][i] for i in range(numCols)] for j in range(numRows)]", "def diagmatrixlist(inputlist=None, converter=proper, func=None, fake=True, clean=False):\n if inputlist is None:\n inputlist = []\n if func is None:\n func = diagmatrixlist\n outlist = []\n for item in inputlist:\n if islist(item):\n item = func(item)\n if not clean or not isnull(item):\n outlist.append(item)\n out = matrix(len(outlist), converter=converter, fake=fake)\n for x in xrange(0, len(outlist)):\n out.store(x,x, outlist[x])\n return out", "def make_matrix(num_rows: int,\n num_cols: int,\n entry_fn: Callable[[int, int], float]) -> Matrix:\n return [[entry_fn(i, j) for j in range(num_cols)]\n for i in range(num_rows)]", "def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix:\n return [[entry_fn(i, j)\n for j in range(num_cols)]\n for i in range(num_rows)]", "def zeros_matrix(self, rows, cols):\r\n M = []\r\n while len(M) < rows:\r\n M.append([])\r\n while len(M[-1]) < cols:\r\n M[-1].append(0.0)\r\n\r\n return M", "def domatrixlist(inputlist, converter=proper):\n try:\n out = matrixlist(inputlist, converter)\n except:\n out = diagmatrixlist(inputlist, converter, domatrixlist)\n return out", "def initializeMatrix(self, seqs):\n currentSequence = seqs[0]\n if len(seqs) == 1:\n # Base case in the recursion, only 1 sequence left\n return [None] * (len(currentSequence) + 1)\n\n else:\n return [self.initializeMatrix(seqs[1:]) for x in range(len(currentSequence) + 1)]", "def board_init():\n board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]\n return board", "def __init__(self):\n self.cont = [[[] for _ in range(101)] for _ in range(101)]", "def init_two_d_array(dimens, val):\n w, x = dimens\n return [[val for j in range(x)] for i in range(w)]", "def empty_matrix(self):\r\n\r\n return [[0 for i in range(len(self.s2)+1)] for j in range(len(self.s1)+1)]", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n \n return M", "def _zeros_like_nd_list(l, dtype):\n total_size = np.sum([x.size for x in l])\n return np.zeros(total_size, dtype)", "def __init__(self,nrows,ncols,fill=None):\n self.nrows = nrows \n self.ncols = ncols\n self.swapcount = 1\n self.initargs = str((self.nrows,self.ncols,fill))\n\n if type(nrows) != int or type(ncols) != int:\n raise TypeError('matrix number of rows and columns must be ints')\n if nrows <= 0 or ncols <= 0:\n raise ValueError('matrix number of rows and columns must be positive')\n \n self.matrix = [[0 for i in range(self.ncols)] for j in range(self.nrows)]\n if fill != None:\n self.fill(fill)", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def private_create_matrix(sample_size, dim, n_param):\n if dim == 0:\n point = []\n for i in range(n_param):\n point.append(0)\n return [point, 9]\n return [private_create_matrix(sample_size, dim - 1, n_param) for _ in range(sample_size)]", "def is_matrix(self, a_list):\n if type(a_list) != list:\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for index in range(len(a_list)):\n if type(a_list[index]) != list or \\\n len(a_list[index]) != len(a_list[(index - 1)]):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for value in a_list[index]:\n if not isinstance(value, (int, float)):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n return a_list", "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def set_featurematrix_length(final_list):\n featurematrix = []\n\n if len(final_list) == 3:\n featurematrix = [[a, b, c] for a, b, c in zip(*final_list)]\n elif len(final_list) == 4:\n featurematrix = [[a, b, c, d] for a, b, c, d in zip(*final_list)]\n elif len(final_list) == 9:\n featurematrix = [[a, b, c, d, e, f, g, h, i] for a, b, c, d, e, f, g, h, i in zip(*final_list)]\n elif len(final_list) == 10:\n featurematrix = [[a, b, c, d, e, f, g, h, i, j] for a, b, c, d, e, f, g, h, i, j in zip(*final_list)]\n elif len(final_list) == 12:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l] for\n a, b, c, d, e, f, g, h, i, j, k, l\n in zip(*final_list)]\n elif len(final_list) == 13:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m] for\n a, b, c, d, e, f, g, h, i, j, k, l, m\n in zip(*final_list)]\n elif len(final_list) == 14:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n\n in zip(*final_list)]\n elif len(final_list) == 16:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p\n in zip(*final_list)]\n elif len(final_list) == 17:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q\n in zip(*final_list)]\n elif len(final_list) == 21:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u\n in zip(*final_list)]\n elif len(final_list) == 23:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w\n in zip(*final_list)]\n elif len(final_list) == 25:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y\n in zip(*final_list)]\n elif len(final_list) == 26:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z\n in zip(*final_list)]\n elif len(final_list) == 28:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb\n in zip(*final_list)]\n elif len(final_list) == 29:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc\n in zip(*final_list)]\n elif len(final_list) == 34:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh\n in zip(*final_list)]\n elif len(final_list) == 35:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii\n in zip(*final_list)]\n elif len(final_list) == 37:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk\n in zip(*final_list)]\n elif len(final_list) == 38:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll\n in zip(*final_list)]\n elif len(final_list) == 39:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm\n in zip(*final_list)]\n elif len(final_list) == 41:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo\n in zip(*final_list)]\n elif len(final_list) == 42:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo, pp] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp\n in zip(*final_list)]\n elif len(final_list) == 47:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu\n in zip(*final_list)]\n elif len(final_list) == 48:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv\n in zip(*final_list)]\n elif len(final_list) == 50:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv, ww, xx] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv, ww, xx\n in zip(*final_list)]\n elif len(final_list) == 51:\n featurematrix = [\n [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg,\n hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv, ww, xx, yy] for\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, bb, cc, dd, ee, ff, gg, hh, ii, jj, kk, ll, mm, nn, oo, pp, qq, rr, ss, tt, uu, vv, ww, xx, yy\n in zip(*final_list)]\n\n return featurematrix", "def diagM(l):\r\n dim = len(l)\r\n M = np.zeros((dim, dim))\r\n np.fill_diagonal(M, l)\r\n return matrix(M)", "def init_Dist_Matrix(length):\r\n dist_matrix = []\r\n \r\n while len(dist_matrix) < length:\r\n dist_matrix.append([])\r\n while len(dist_matrix[-1]) < length:\r\n dist_matrix[-1].append(float(0))\r\n \r\n # print_matrix(dist_matrix) #just for the visuals can be removed later\r\n return(dist_matrix)", "def from_list(cls, rows, domain):\n nrows = len(rows)\n ncols = 0 if not nrows else len(rows[0])\n conv = lambda e: domain(*e) if isinstance(e, tuple) else domain(e)\n domain_rows = [[conv(e) for e in row] for row in rows]\n return DomainMatrix(domain_rows, (nrows, ncols), domain)", "def getVector(lstOfValues):\n return MatrixExtended([[v] for v in lstOfValues])", "def __init__(self, list_of_matrices):\n self._nz_tuples = None\n self._maps = None\n self._build_maps(list_of_matrices)", "def generate_matrix(num_rows, num_cols):\n size = num_rows * num_cols\n matrix = []\n for r in xrange(num_rows):\n start = r * num_cols\n stop = (r + 1) * num_cols\n ls = range(start, stop)\n matrix.append(ls)\n Matrix = namedtuple(\"Matrix\", [\"size\", \"rows\", \"cols\", \"matrix\"])\n return Matrix(size=size, rows=num_rows, cols=num_cols, matrix=matrix)", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n\n return M", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n\n return M", "def vector_as_matrix(v):\r\n return [[v_i] for v_i in v]", "def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]", "def make_matrix():\n row, col = [int(x) for x in input().split()]\n island = [[int(x) for x in input().split()] for _ in range(row)]\n return row, col, island", "def cell2mat2(l, max_len=None) -> nptyp.NDArray[float]:\n if max_len is None:\n max_len = np.amax([len(l1) for l1 in l])\n \n n = len(l)\n m = np.zeros([n, max_len]) + np.nan\n \n for ii in range(n):\n l1 = l[ii]\n if len(l1) > max_len:\n m[ii,:] = l1[:max_len]\n elif len(l1) < max_len:\n m[ii,:len(l1)] = l1\n else:\n m[ii,:] = l1\n\n return m", "def __new__(cls, rowslist, shape, domain):\n flint_mat = cls._get_flint_func(domain)\n\n if 0 not in shape:\n try:\n rep = flint_mat(rowslist)\n except (ValueError, TypeError):\n raise DMBadInputError(f\"Input should be a list of list of {domain}\")\n else:\n rep = flint_mat(*shape)\n\n return cls._new(rep, shape, domain)", "def create_fabric_matrix(rows, columns):\n return [['.'] * columns for i in range(rows)]", "def gen_matrix(e):\n\tif e < 1:\n\t\treturn None\n\tm_list = [[[1, 2], [3, 0]]]\n\t_b = m_list[0]\n\tfor n in xrange(1, e):\n\t\tm = m_list[n - 1]\n\t\tm_list.append(\n\t\t\t[\n\t\t\t\t[4 * i + _b[0][0] for i in m[0]] + [4 * i + _b[0][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[0][0] for i in m[1]] + [4 * i + _b[0][1] for i in m[1]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[0]] + [4 * i + _b[1][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[1]] + [4 * i + _b[1][1] for i in m[1]],\n\t\t\t]\n\t\t)\n\treturn m_list", "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def create_board_matrix(self, height, width):\n return [[' '] * width for _ in xrange(height)]", "def setup(self, length):\n self.matrix = [None] * length\n for x in range(0,length):\n self.matrix[x] = [None] * length\n self.i = self.k = self.j = 0", "def build_matrix(prompt_size, prompt_matrix):\n\n rows, cols = [int(i) for i in get_num_row(\n 2,\n lambda x: int(x),\n prompt=prompt_size\n )]\n matrix = []\n\n print(prompt_matrix)\n for i in range(rows):\n row = get_num_row(cols, lambda x: float(x))\n matrix.append(row)\n\n return matrix", "def initialize(self, height, width,):\n grid = list()\n for x in xrange(height):\n grid.append(list())\n for y in xrange(width):\n grid[x].append(Node(x, y))\n return grid", "def list2matrix_homog(xy_shape): \n vertex_array = [] \n for i in range(0, len(xy_shape), 2): \n vertex_x = xy_shape[ i] \n vertex_y = xy_shape[i + 1] \n extra_element = 1.0 \n vertex = [vertex_x, vertex_y, extra_element] \n vertex_array.append(vertex) \n vertex_matrix = numpy.matrix(vertex_array) \n return vertex_matrix", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def __init__(self, board=None):\n self.items = []\n for i in range(3):\n rowlst = []\n for j in range(3):\n if board is None:\n rowlst.append(Dummy())\n else:\n rowlst.append(board[i][j])\n self.items.append(rowlst)", "def init_fm_mps(L):\n d = 2\n B = []\n s = []\n for i in range(L):\n B.append(np.zeros([2,1,1])); B[-1][0,0,0]=1\n s.append(np.ones([1]))\n s.append(np.ones([1]))\n return B,s", "def ones(cls, size:(int,int)) -> 'Matrix': #note single quotes because this is the class, itself and has not been completely defined yet.\n N = size[0]\n M = size[1]\n assert N > 0 and M > 0, \"N and M must be positive.\"\n return cls([[1 for col in range(M)] for row in range(N)])", "def make_table(m, n):\n return [[0] * n for _ in range(m)]", "def question_two():\n # [[][][]]\n x = [[]]*3\n #[[a],[a],[a]]\n x[0].append('a')\n #[[a, b],[a, b],[a, b]]\n x[1].append('b')\n #[[a, b, c],[a, b, c],[a, b, c]]\n x[2].append('c')\n #[[d],[a, b, c],[a, b, c]]\n x[0] = ['d']", "def from_matrix(cls, matrix: list) -> object:\n return cls(tool.matrix_to_array(matrix))", "def __init__(self, num_rows):\r\n self.num_rows = num_rows\r\n\r\n # Make the linear array where we store items.\r\n num_items = self.num_cells_for_rows(self.num_rows)\r\n self.values = [None for i in range(num_items)]", "def __init__(self, matrix_string: str) -> None:\n self.matrix = [[int(item) for item in line.split()] for line in matrix_string.splitlines()]", "def list_to_sparse(inputs):\n\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(\n *[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n\n s = coo_matrix((data, (row, col)), shape=(\n len(inputs), np.max([len(x) for x in inputs])))\n\n return s", "def _matrix_(self, R):\n from sage.matrix.all import matrix\n matlab = self.parent()\n entries = matlab.strip_answer(matlab.eval(\"mat2str({0})\".format(self.name())))\n entries = entries.strip()[1:-1].replace(';', ' ')\n entries = [R(_) for _ in entries.split(' ')]\n nrows, ncols = map(int, str(self.size()).strip().split())\n m = matrix(R, nrows, ncols, entries)\n return m", "def _makeWaMatrix_(self, wa, nRow, nCol):\n\t\t#print nRow, nCol\n\t\t#print wa\n\t\t#print\n\t\twaMatrix = [[0 for j in xrange(nCol)] for i in xrange(nRow)]\n\t\tfor a in wa: \n\t\t\tfor i in a[0]:\n\t\t\t\tfor j in a[1]:\n\t\t\t\t\twaMatrix[i][j] = 1\n\t\treturn waMatrix", "def fromRows(data):\n m = len(data)\n n = len(data[0])\n # check that data structure is valid\n if any([len(row) != n for row in data[1:]]):\n raise ValueError(\"inconsistent row lengths\")\n # check that data types are inconsistent\n t = type(data[0][0])\n if any(any(type(e) is not t for e in row[(i == 0):])\n for i, row in enumerate(data)):\n raise TypeError(\"inconsistent element types\")\n # dispatch to childern based on type\n if t is bool:\n return BooleanMatrix(m, n, data)\n elif t is int:\n return IntegerMatrix(m, n, data)\n if t is float:\n return RealMatrix(m, n, data)", "def np_row_vec (init_list):\n return np.array (init_list, order='F', ndmin=2)", "def __init__(self, rows, columns, fillValue = None):\n self.data = []\n for row in range(rows):\n dataInRow = []\n for column in range(columns):\n dataInRow.append(fillValue)\n self.data.append(dataInRow)", "def fill(self,value):\n if value is None:\n return\n if isinstance(value,numbers.Number):\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value\n elif isinstance(value,list):\n if len(value) != self.nrows*self.ncols:\n raise ValueError('matrix fill value has incorrect number of elements')\n\n if not all(isinstance(item,numbers.Number) for item in value):\n raise TypeError('matrix fill value not a list of numbers')\n index = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value[index]\n index += 1 \n else:\n raise TypeError('matrix fill value not a number')", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def clone_matrix(mat):\n return [[x for x in row] for row in mat]", "def matrices(self):\n return [ self.__class__(labels=self.labels,\n labels_map=self.labels_map,\n sets=[x]) for x in self.sets]", "def __init__(self, rows, cols, default_val=0):\n self.num_rows = rows\n self.num_cols = cols\n\n # Initialize the 2-dimensional array\n self.rows = [[default_val] * cols for _ in xrange(rows)]", "def init_output_mat(self, y_list):", "def matrixitems(inputitems, y, x=None, converter=proper, fake=False):\n if x is None:\n x = len(inputitems)/y\n out = matrix(y, x, converter=converter, fake=fake)\n z = 0\n for y,x in out.coords(False):\n out.store(y,x, inputitems[z])\n z += 1\n return out", "def __init__(self, m=0, n=0, values=[]):\n self.m = m\n self.n = n\n self.values = [[0 for _ in xrange(m)] for _ in xrange(n)]\n value = 0\n for i in xrange(self.n):\n for j in xrange(self.m):\n if value >= len(values):\n break\n (self.values[i])[j] = values[value]\n value += 1" ]
[ "0.732879", "0.705147", "0.69763094", "0.6929091", "0.69166654", "0.68848765", "0.68401814", "0.6766674", "0.66469675", "0.6645936", "0.66292936", "0.66119903", "0.6598583", "0.65325534", "0.6505658", "0.6502061", "0.6496865", "0.6486795", "0.6459021", "0.64460206", "0.6435002", "0.6429597", "0.6403512", "0.6387601", "0.6344631", "0.6322565", "0.63191", "0.631901", "0.63167685", "0.6307809", "0.6303603", "0.62989074", "0.6281622", "0.6223811", "0.6215729", "0.62086415", "0.6198446", "0.6195766", "0.6180441", "0.6178887", "0.61743647", "0.6152434", "0.6138921", "0.6114839", "0.6111596", "0.6104974", "0.61027825", "0.60899353", "0.6083277", "0.6064698", "0.60608166", "0.6060761", "0.6060761", "0.60571384", "0.60439014", "0.6033327", "0.60275793", "0.60223675", "0.6013839", "0.60133564", "0.6003625", "0.5996253", "0.5987874", "0.5964008", "0.5964008", "0.59535664", "0.59521425", "0.594696", "0.5939783", "0.5925974", "0.5920959", "0.59117377", "0.5911088", "0.5895856", "0.5870089", "0.5854977", "0.58379775", "0.58356863", "0.58288956", "0.5825411", "0.58157843", "0.58104086", "0.580989", "0.580549", "0.5794018", "0.5765517", "0.57634515", "0.5759985", "0.5754899", "0.5749789", "0.57466555", "0.5741957", "0.57385355", "0.5735712", "0.5731949", "0.572887", "0.5728818", "0.5724974", "0.5724839", "0.5715316", "0.57134724" ]
0.0
-1
Return a string for the array of numbers for this matrix
def __str__(self): return str(self.array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self) -> str:\n\t\treturn \",\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \",\".join(st)+\"]\"\n return str(self.m)+\"x\"+str(self.n)+\" [\" + output + \"]\"", "def __str__(self):\n return str(self.asMatrix())", "def __str__(self) -> str:\n\t\treturn f\"dim {self.dimM},{self.dimN}\" +\"\\n\" \\\n\t\t\t+ \"\\n\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def __str__(self):\n ans = \"\"\n for i in range(self.row):\n for j in range(self.col):\n ans+=str(self.array[i][j])+\" \"\n ans+=\"\\n\"\n return ans", "def _matrixToStr(self, name, mat):\n r = []\n r.append(\"\\n\" + name)\n for i in range(len(self.answer['a priori state vector'])):\n r.append(\", \".join([\"{0:=+10.4g}\".format(float(v)) \n for v in mat[:, i]]))\n return \"\\n\".join(r)", "def matrix2str(A):\n s = \"\"\n for x in numpy.nditer(A, order='F'):\n s = s + str(x) + \",\"\n\n return s", "def matrix_to_str(matrix):\n output_str = \"[\"\n for i in matrix:\n output_str += \"[\"\n for j in i:\n output_str += str(j) + \", \"\n output_str = output_str[:-2] + \"], \"\n output_str = output_str[:-2] + \"]\"\n return output_str", "def convert_to_string_array(matrix):\n res = []\n for row in matrix:\n res.append(''.join(row))\n return '\\n'.join(res)", "def __repr__(self):\n return repr(self.matrix)", "def __str__(self):\r\n # The full representative string\r\n str_matrix = \"\"\r\n\r\n if self.matrix is not None:\r\n # Save the lenght into a variable\r\n # to send this number to the tiles method\r\n # and calculate the number of spaces\r\n spaces = len(self.matrix)\r\n for i in range(0, spaces):\r\n nums = list(filter(lambda x: x != \"_\", self.matrix[i]))\r\n str_matrix += self.tiles(nums, (i+1), (spaces - i))\r\n\r\n return str_matrix", "def matrixToString(matrix):\n nRows = len(matrix)\n if nRows == 0:\n return '[0,0](())'\n nCols = len(matrix[0])\n string = '[%d,%d](' % (nRows, nCols)\n for r in range(nRows):\n string += '('\n for c in range(nCols):\n string += str(float(matrix[r][c]))\n if c != nCols - 1:\n string += ','\n string += ')'\n if r != nRows - 1:\n string += ','\n string += ')'\n return string", "def values_to_grid(self):\n str = []\n for r in self.rows:\n for c in self.cols:\n v = self.values[r + c]\n str.append(v if len(v) == 1 else '0')\n return ''.join(str)", "def __repr__(self):\n return self.matrix and '\\n'.join([\"|%s|\" % s for s in [' '.join([\"%-6.3f\" % e for e in w]) for w in self.matrix]]) or \"<pusta macierz>\"", "def m_numeric_array(self, value):\n return '<numeric_array id=\"%s\" encoding=\"base64\">%s</numeric_array>' % \\\n (self.register(value), Numeric.dumps(value).encode('base64'))", "def format_matrix(x):\n return ' '.join([format_vector(y) for y in x])", "def __str__(self):\n my_str=\"[\"\n for elem in range(self.size):\n x=cArray.cModule.get_element(self.arrayRef,ctypes.c_int(elem))\n my_str+=str(x)+\" \"\n my_str+=\"]\"\n return my_str", "def __str__(self):\n return str(self.arr)", "def matrix_to_text(matrix):\n return ''.join(['{:02x}{:02x}{:02x}{:02x}'.format(\n matrix[0][c], matrix[1][c], matrix[2][c], matrix[3][c]) for c in range(4)])", "def np2str(a: np.ndarray) -> str:\n return json.dumps(a.tolist())", "def __str__(self):\n result = \"\"\n for row in range(self.size):\n current = []\n for column in range(self.size):\n subscript = (row, column)\n current.append(self.get(subscript))\n if result == \"\":\n result = \"[{}\".format(current)\n else:\n result = \"{0}\\n {1}\".format(result, current)\n return \"{}]\".format(result)", "def get_matrix_string(matrix, variable_name=None, decimals=4):\n\n if decimals:\n matrix = np.round(matrix, decimals=decimals) + 0\n else:\n matrix = np.array(matrix)\n\n header = \"$$\\n\"\n if variable_name:\n header += f\"{variable_name} = \\\\begin{{bmatrix}}\\n\"\n else:\n header += \"\\\\begin{bmatrix}\\n\"\n\n footer = \"\\\\end{bmatrix}\\n$$\"\n\n matrix_string = \"\"\n\n assert hasattr(matrix, \"__iter__\"), \"The matrix provided was not iterable\"\n\n for row in matrix:\n row_string = \"\"\n for idx, value in enumerate(row):\n row_string += f\"{value:.4g}\"\n if idx != len(row) - 1:\n row_string += \" & \"\n row_string += \" \\\\\\\\ \\n\"\n matrix_string += row_string\n\n return header + matrix_string + footer", "def __str__(self):\n outstr = \"\"\n for i in range(3):\n for j in range(3):\n outstr += str(self.pos_to_num[(i, j)]) + \" \"\n outstr = outstr[:-1]\n outstr += \"\\n\"\n outstr += \"\\n\"\n return outstr", "def __str__(self):\n lst = [str(i) for i in self.data]\n if self.column:\n return '[' + ', '.join(lst) + ']\\''\n else:\n return '[' + ', '.join(lst) + ']'", "def linecodes_read_rmatrix(self) -> str:\n return Bridge.var_array_function(self.dss_obj.LineCodesV, 0, None, '')", "def dataAsString(self):\n\n # Force generation of .array\n d = self.asArray()\n slist = []\n for l in self.array:\n s = \"%s %s\" % (self.name, self.rowAsString(l))\n slist.append(s)\n return '\\n'.join(slist)", "def __str__(self) -> str:\n return '\\n'.join([' '.join([str(u) for u in row]) for row in self.adjacency_matrix])", "def matrix_to_string(main_list):\n output = \"\"\n for sub_list in main_list:\n for element in sub_list:\n output += element\n output += \"\\n\"\n return output.rstrip(\"\\n\")", "def sage2matlab_matrix_string(self, A):\n return str(A.rows()).replace('), (', '; ').replace('(', '').replace(')','')", "def linecodes_read_xmatrix(self) -> str:\n return Bridge.var_array_function(self.dss_obj.LineCodesV, 2, None, '')", "def to_string(self):\r\n result = \"\"\r\n for row in range(self.num_rows):\r\n for col in range(row + 1):\r\n result += f\"{self[(row, col)]} \"\r\n result += \"\\n\"\r\n return result", "def _numpy_text(tensor):\n if dtype_util.is_numpy_compatible(tensor.dtype):\n value = np.array(tensor)\n if value.shape:\n text = repr(value)\n else:\n text = str(value)\n else:\n text = '<unprintable>'\n if '\\n' in text:\n text = '\\n' + text\n return text", "def matrix_string_zeros(self, size, prefix = ''):\n if size == 1: # Not indexed\n return np.array2string(np.array(0.))\n if len(size) == 1:\n size.append(1)\n mat = np.zeros(size)\n return np.array2string(mat, prefix = prefix, sign = ' ', separator = ',')", "def __str__(self):\n return np.array2string(self.graph.toarray())", "def toString(arr2d):\n return (\"\\n\".join(\"\\t\".join(row) for row in arr2d))", "def pretty_print(self):\r\n out = \"\"\r\n\r\n rows,cols = self.matrix.shape\r\n\r\n for row in xrange(0,rows):\r\n out += \"[\"\r\n\r\n for col in xrange(0,cols):\r\n out += \"%+0.2f \"%self.matrix[row][col]\r\n out += \"]\\n\"\r\n\r\n return out", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()", "def __str__(self):\n s =\"\"\n if self.data is 0:\n return \"[]\"\n else:\n for i in range(len(self.data)):\n s += str(self.data[i])\n if i != len(self.data)-1:\n s += \", \"\n return \"[\" + s + \"]\"", "def __str__(self):\n rows = ['[' + ', '.join([str(i) for i in row]) + ']' for row in self.data]\n return '\\n'.join(rows)", "def _repr_latex_(self):\n if len(self.shape) > 2:\n raise ValueError(\"bmatrix can at most display two dimensions\")\n\n def fmt(x):\n if x == 0:\n return \".\"\n if np.abs(x) < EPS:\n return \"0.\"\n return \"{:.2g}\".format(x)\n\n temp_string = np.array2string(\n self,\n formatter={\"float_kind\": fmt},\n edgeitems=3,\n )\n lines = temp_string.replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\n rv = [r\"\\begin{bmatrix}\"]\n rv += [\" \" + \" & \".join(l.split()) + r\"\\\\\" for l in lines]\n rv += [r\"\\end{bmatrix}\"]\n return \"\\n\".join(rv)", "def __repr__(self):\n result = \"\"\n row_prefix = \"\"\n for i in xrange(self.num_rows):\n result += row_prefix\n col_prefix = \"\"\n for j in xrange(self.num_cols):\n result += col_prefix + str(self[i, j])\n col_prefix = \" \"\n row_prefix = \"\\n\"\n\n return result", "def float_array_string(arr: Iterable[float]) -> str:\n return \"[\" + \", \".join([\"{:.4f}\".format(el) for el in arr]) + \"]\"", "def __str__(self):\n if self._houses_num == 0:\n return \"[0]\"\n \n out_str = \"[\"\n \n for indx in range(self._houses_num):\n if indx == 0:\n out_str = \"[%d,\" %(self._houses[indx])\n elif indx != self._houses_num -1:\n out_str = \"%s %d,\" %(out_str, self._houses[indx])\n else:\n out_str = \"%s %d]\" %(out_str, self._houses[indx])\n \n return out_str", "def __str__(self):\n\n result = \"n: \" + str(self.n) + \"\\n\"\n result += \"m: \" + str(self.m) + \"\\n\"\n result += \"ns: \" + str(self.ns) + \"\\n\"\n result += \"s0: \" + str(self.s0) + \"\\n\"\n result += \"goals: \" + str([self.goals[i] for i in range(self.ng)]) + \"\\n\"\n result += \"horizon: \" + str(self.horizon) + \"\\n\"\n result += \"gamma: \" + str(self.gamma) + \"\\n\\n\"\n\n result += \"S(s, a, s'):\\n%s\" % (str(np.array([self.S[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"T(s, a, s'):\\n%s\" % (str(np.array([self.T[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"R(s, a):\\n%s\" % (str(np.array([self.R[i] \\\n for i in range(self.n * self.m)]).reshape((self.n, self.m)))) + \"\\n\\n\"\n\n return result", "def __str__(self):\n s = \"\"\n for v in self.vectors:\n s += str(v) + \"\\n\"\n return s", "def __str__(self):\n largest_element = max(self)\n length = int(log10(largest_element)) + 1\n\n s = '\\n'.join([' '.join([\n f\"{elem:>{length}}\" for elem in row])\n for row in self.data])\n return s + '\\n'", "def as_str(self):\n return soho.arrayToString('\"%s\" [ ' % self.type_name, self.value, \" ]\")", "def __repr__ (self):\n return \"Matrix{}\".format(repr(self._m))", "def __str__(self):\n largest_element = max(self)\n integer_part_length = int(log10(largest_element)) + 1\n length = integer_part_length + self.str_decimal_places + 1\n\n s = '\\n'.join([' '.join([\n f\"{elem:{length}.{self.str_decimal_places}f}\"\n for elem in row])\n for row in self.data])\n return s + '\\n'", "def __str__(self):\n # string accumulator\n result = \"\\n\"\n\n for n in self.from_grid:\n for m in n:\n result += \" \" + m\n result += \"\\n\"\n\n return result", "def __str__(self):\n rv = '[ '\n n = len(self.mV)\n i = 0\n for f in self.mV:\n rv += self.mPrintSpec % f\n i += 1\n if (i < n):\n rv += ', '\n else:\n rv += ' '\n rv += ']'\n return rv", "def __str__(self):\n result = ''\n for row in range(self.getHeight()):\n for col in range(self.getWidth()):\n result += str(self.data[row][col]) + ' '\n result += '\\n'\n return result", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def linecodes_read_cmatrix(self) -> str:\n return Bridge.var_array_function(self.dss_obj.LineCodesV, 4, None, '')", "def _repr_(self):\n return 'A {}x{} symbolic max plus matrix on {} variables'.format(\n self.dim(), self.dim(), self.num_vars())", "def __str__(self):\n result = \"\"\n for row in self._cells:\n result += \" \".join(map(str, row))\n result += \"\\n\"\n return result", "def intArrayToString(ivalues, prec=3, delem=\",\"):\n\tsvalues = list(map(lambda v : str(v), ivalues))\n\tdelem = \" \" if delem is None else delem\n\treturn delem.join(svalues)", "def __repr__(self):\n return '{!s}, {!s}'.format(self.j, self.m)", "def usd_format(self,number_array):\n return np.char.array(np.around(number_array,decimals=3),itemsize=5)", "def matrixRepresentation(self,decimals=8):\n temp = self.circuit.copy()\n temp.remove_final_measurements()\n \n simulator = Aer.get_backend('unitary_simulator')\n result = execute(temp, backend=simulator).result()\n unitary = result.get_unitary(decimals=decimals).tolist()\n for i in range(len(unitary)):\n for j in range(len(unitary[i])):\n if unitary[i][j]==0:\n unitary[i][j]=\"0\"\n else:\n string=str(unitary[i][j].real).replace(\".0\", \"\")\n string=\"\" if unitary[i][j].real==0 else string\n string+=self.numberFormat(unitary[i][j].imag,True)\n unitary[i][j]=string.lstrip(\"+\")\n return unitary", "def __str__(self):\r\n string_rep_of_grid=\"\"\r\n row=\"\"\r\n for dummy_j in range(self._height):\r\n for dummy_i in range(self._width):\r\n row=row+str(self._grid[dummy_j][dummy_i])+\" \" \r\n string_rep_of_grid=string_rep_of_grid+\"row number \"+str(dummy_j)+\": \"+row\r\n row=\"\"\r\n return string_rep_of_grid", "def matrixconverter(seqmatrix):\n\n\tdic = {0:\"A\",1:\"C\",2:\"G\",3:\"T\"} # dictionary of indexes of each nucleotide for matrices\n\ta = np.transpose(np.nonzero(np.transpose(seqmatrix))).tolist()\n\tseqstring = \"\"\n\tfor i in a:\n\t\tseqstring += dic[i[1]]\n\treturn seqstring", "def str_conn_matrix(self):\n\n st = 'Connection Matrix:\\n'\n for line in self.connection_matrix:\n st += line + \"= [ \"\n for j in range(len(self.connection_matrix[line])):\n st += str(self.connection_matrix[line][j]) + \" \"\n\n st += \"], \"\n\n return st", "def __str__(self):\n grid_str = \"\"\n for i in range(len(self.grid)):\n for j in range(len(self.grid[i])):\n grid_str = grid_str + self.grid[i][j] + '\\t'\n grid_str = grid_str.strip('\\t')\n grid_str = grid_str + '\\n'\n return grid_str", "def get_M_as_string(self):\n return '\\n'.join(['M({})={}'.format(p.name, p.M) for p in self.P])", "def __repr__(self):\n return \"[\" + \", \".join([str(member) for member in self.table]).rstrip(\",\") + \"]\"", "def __str__(self):\n origin = [len(u) for u in self.sequences]\n currentorigin = origin\n returnstring = \"The score for this alignment is: \" + str(self.retrievematrixelement(origin).score) + \"\\n\\n\"\n\n if self.retrievematrixelement(origin) == None:\n return \"The matrix has not been solved yet. Call the solve() method to solve the matrix.\"\n\n else:\n result = []\n resultstrings = [\"\"] * len(self.sequences)\n while currentorigin != [0] * len(self.sequences):\n result.insert(0, self.retrievematrixelement(currentorigin).coordinate)\n currentorigin = result[0]\n\n result.append(origin)\n\n for u in range(1, len(result)):\n origin = result[u - 1]\n destination = result[u]\n for v in range(len(resultstrings)):\n if origin[v] == destination[v]:\n resultstrings[v] += \".\"\n else:\n resultstrings[v] += self.sequences[v][destination[v] - 1]\n\n for value in resultstrings:\n returnstring += value + '\\n'\n\n return returnstring", "def matrix_string_ones(self, size, prefix = ''):\n if size == 1: # Not indexed\n return np.array2string(np.array(1.))\n if len(size) == 1:\n size.append(1)\n mat = np.ones(size)\n return np.array2string(mat, prefix = prefix, sign = ' ', separator = ', ')", "def __str__(self) -> str:\n result = \"[\"\n for i in range(len(self)):\n if i > 0:\n result += ', '\n result += str(self[i])\n result += ']'\n return result", "def int_repr(arr):\n return list(map(list2int, arr))", "def __repr__(self):\n class_name = self.__class__.__name__\n data = ',\\r\\n'.join(\n [' [' + ', '.join(list(map(str, row))) + ']'\n for row in self.data])\n return f'{class_name}({self.m}, {self.n}, [\\r\\n{data}\\r\\n])'", "def __repr__(self):\r\n\r\n # If the input is a single int/float (with no shape) return a 'scalar'\r\n # time-point:\r\n if self.shape == ():\r\n return \"%r %s\" % (int(self) / float(self._conversion_factor),\r\n self.time_unit)\r\n # Otherwise, return the TimeArray representation:\r\n else:\r\n return np.ndarray.__repr__(self / float(self._conversion_factor)\r\n )[:-1] + \", time_unit='%s')\" % self.time_unit", "def __str__(self):\n\t\tstring = \"\"\n\t\tfor i in self.board:\n\t\t\tfor j in i:\n\t\t\t\tstring += str(j)\n\t\t\tstring += \"\\n\"\n\t\treturn string", "def __str__(self):\n return str(self._cells)", "def __str__(self):\n return '\\n'.join(str(self._board[j]) for j in range(self._n))", "def __str__(self):\n return '[' + ', '.join([str(x) for x in self.elem]) + ']'", "def tostring(self, transformer=None):\n\t\tresult = ''\n\t\tif not transformer:\n\t\t\ttransformer = str\n\t\tfor start in range(0, len(self.data), self.dims[0]):\n\t\t\tfor c in self.data[start:start+self.dims[0]]:\n\t\t\t\tresult += transformer(c)\n\t\t\tresult += '\\n'\n\t\treturn result", "def latex(self):\n from nodepy.snp import printable\n sep = ' & '\n s= r'\\begin{align}' + '\\n'\n s+=r'\\begin{array}{c|'\n s+='c'*len(self)\n s+='}\\n'\n for i in range(len(self)):\n s+=printable(self.c[i]) + sep\n s += sep.join([printable(aij) for aij in self.A[i,:]])\n s+=r'\\\\' + '\\n'\n s+=r'\\hline' + '\\n'\n s += sep\n s += sep.join([printable(bj) for bj in self.b])\n s+=r'\\\\' + '\\n'\n if hasattr(self,'bhat'):\n s += sep\n s += sep.join([printable(bj) for bj in self.bhat])\n s += '\\n'\n s += r'\\end{array}' + '\\n'\n s += r'\\end{align}'\n s=s.replace('- -','')\n return s", "def toString(self):\n printme = \"\"\n for i in range (0,len(self.maze)):\n for j in self.maze[i]:\n printme = printme + j\n printme = printme + \"\\n\"\n return printme", "def __str__(self):\n grid_str = \"[\"\n for row in range(self._height):\n grid_str += \" \" + str(self._grid[row]) + \"\\n\"\n grid_str = grid_str[0:1] + grid_str[2:]\n grid_str = grid_str[:-1]\n grid_str += \"]\"\n return grid_str", "def convertToGrid(numberString):\r\n numList = [int(ch) for ch in numberString]\r\n numArr = np.reshape(np.array(numList),(9, 9))\r\n return numArr", "def as_str(self) -> str:\n return dumps(self.as_dict(), cls=NumpyEncoder)", "def _values_number(self) -> ndarray:\n if 'f' in self._data:\n arr_dtype = 'float64'\n else:\n arr_dtype = 'int64'\n\n col_num: int = 0\n for kind, arr in self._data.items():\n if kind in 'ifb':\n col_num += arr.shape[1]\n shape: Tuple[int, int] = (len(self), col_num)\n\n v: ndarray = np.empty(shape, dtype=arr_dtype, order='F')\n for i, (_, col_arr, dtype, _) in enumerate(self._col_info_iter(with_arr=True)):\n if dtype in 'ifb':\n v[:, i] = col_arr\n return v", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def m_array(self, value):\n return '<array id=\"%s\" typecode=\"%s\" encoding=\"base64\">%s</array>' % \\\n (self.register(value), value.typecode, value.tostring().encode('base64'))", "def writeMatrix(self):\n\t\tpass", "def __str__(self):\n [r,c],f = self.D, self.F\n lmax = len(str(max(iter(self)))) + 1\n s = '\\n'.join( (' '.join('{0:{l}G}'.format(f(i,j),l=lmax) if isinstance(f(i,j), int) or isinstance(f(i,j), float) else str(f(i,j)) for j in range(c))) for i in range(r))\n return s", "def get_numerical_repr(self):\r\n return \"%s\\t%s\\t%.2f\\n\" % (self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc)", "def get_qtable_str(self):\n output = \"[\\n\"\n for row in self.qtable:\n output += \"\\t\" + str([round(x,2) for x in row]) + \",\\n\"\n output += \"]\\n\"\n\n return output", "def __str__(self):\n ans = \"\"\n for row in range(self._grid_height):\n ans += str(self._cells[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def __repr__(self):\n res = ''\n for i in range(1, self.getNumRows() + 1):\n for j in range(1, self.getNumCols() + 1):\n res += repr(self.getItem(i, j))\n res += ' ' if j < self.getNumCols() else '\\n'\n return res" ]
[ "0.7381142", "0.7210961", "0.7170365", "0.7031644", "0.6882609", "0.68663853", "0.6862946", "0.6755842", "0.67423487", "0.66417027", "0.65829957", "0.6536184", "0.6523809", "0.6509771", "0.64562243", "0.6420926", "0.64129996", "0.638619", "0.6340786", "0.63212585", "0.626529", "0.62270266", "0.62068915", "0.62015605", "0.61953783", "0.6195149", "0.6184993", "0.61673695", "0.61639726", "0.6157629", "0.6149817", "0.61439705", "0.613813", "0.61302775", "0.6125756", "0.6119411", "0.6110248", "0.6096732", "0.60947746", "0.608849", "0.6057007", "0.60521024", "0.60296863", "0.60165095", "0.60049677", "0.60013586", "0.5986048", "0.59776247", "0.59768337", "0.5973302", "0.59704155", "0.5966224", "0.59501666", "0.59438854", "0.5910919", "0.59107697", "0.58987355", "0.5896482", "0.58820856", "0.58811265", "0.58658594", "0.58345765", "0.5825585", "0.5822985", "0.5813328", "0.5796103", "0.57948303", "0.57893664", "0.57852244", "0.5783274", "0.57766527", "0.5774329", "0.5756809", "0.57488453", "0.5747212", "0.57365596", "0.5728784", "0.5717142", "0.5713061", "0.57079023", "0.570682", "0.5705544", "0.5704251", "0.5697125", "0.5695827", "0.5692535", "0.5692535", "0.5692535", "0.5692535", "0.5692474", "0.5692325", "0.5690998", "0.56896937", "0.56878316", "0.56816363", "0.56632084", "0.56632084", "0.56632084", "0.56618047", "0.5653826" ]
0.66033196
10
Add two matricies together
def __add__(self, otherMatrix): sameRows = (len(self.array) == len(otherMatrix.array)) sameCols = len(self.array[0]) == len(otherMatrix.array[0]) if not (sameCols and sameRows): raise ArithmeticError X = len(self.array) Y = len(self.array[0]) retArray = [[0 for x in range(X)] for x in range(Y)] for row in range(X): for col in range(Y): retArray[row][col] = otherMatrix.array[row][col] + self.array[row][col] return matrix(retArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x+y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def matAdd(a, b):\n shape=matShape(a)\n return [[matGet(a,x,y)+matGet(b,x,y) for y in range(shape[1])] \\\n for x in range(shape[0])]", "def __add__(self,other):\n if self.h != other.h or self.w != other.w:\n raise(ValueError, \"Matrices can only be added if the dimensions are the same\") \n # \n # TODO - your code here\n #\n result = [];\n for i in range(self.h):\n result.append([a+b for a,b in zip(self.g[i],other.g[i])]);\n \n return Matrix(result);", "def __add__(self, other):\n if (self.m != other.m) or (self.n != other.n):\n raise TypeError(\"Dimensions of matrices does not match\")\n tmp = [[0 for _ in xrange(self.m)] for _ in xrange(self.n)]\n for i in xrange(self.n):\n for j in xrange(self.m):\n tmp[i][j] = self.values[i][j] + other.values[i][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.m, self.n, res)", "def add_matrices(m1, m2):\n import numpy as np\n shape = np.shape(m1)\n sum_matrix_l = []\n for row, column in enumerate(m1):\n sum_matrix_l.append(m1[row,column]+m2[row,column])\n sum_matrix = np.array(sum_matrix_l)\n return sum_matrix.reshape(shape)", "def __add__ (self, other):\n if self.dimensions == other.dimensions:\n result = []\n for row in zip(self._m, other._m):\n result.append(sum(cell) for cell in zip(*row))\n return Matrix(*result)\n else:\n raise ValueError(\"Matrices must have the same dimensions.\")", "def __add__(self, other):\n if not isinstance(other, Matrix) or not self.equal_size(other):\n raise ValueError(\"Can only add two Matrix objects with same dimensions\")\n\n vectors = list()\n for i in range(self.m):\n v1 = self.vectors[i]\n v2 = other.vectors[i]\n vectors.insert(i, v1 + v2)\n return Matrix(vectors)", "def __add__(self, other):\n if self.rows != other.rows or self.cols != other.cols:\n raise IndexError(\"Size of matrices are not equal: (%i, %i) != (%i, %i)\"%\n (self.rows, self.cols, other.rows, other.cols))\n\n newmat = make_matrix(self.rows, self.cols)\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n newmat[i, j] = self[i, j] + other[i, j]\n return newmat", "def __add__(self,other):\n if self.h != other.h or self.w != other.w:\n raise (ValueError,\n \"Matrices can only be added if the dimensions are the same\")\n #\n # TODO - your code here\n #\n my_add = zeroes(self.h, self.w)\n for i in range(self.h):\n for j in range(self.w):\n my_add.g[i][j] = self.g[i][j] + other.g[i][j]\n\n return my_add", "def __add__(self,other):\n if self.h != other.h or self.w != other.w:\n raise(ValueError, \"Matrices can only be added if the dimensions are the same\")\n #\n # TODO - your code here\n #\n matrix_sum = []\n for i in range(self.h):\n row = []\n for j in range(self.w):\n row.append(self.g[i][j] + other.g[i][j])\n matrix_sum.append(row)\n return Matrix(matrix_sum)\n # TODO - your code here", "def matrix_add():", "def add_matrices(m1, m2): \n output = []\n \n for index in range(len(m1)):\n row_1 = m1[index]\n row_2 = m2[index]\n new_row = []\n for index2 in range(len(row_1)):\n sum = row_1[index2] + row_2[index2]\n new_row.append(sum)\n output.append(new_row)\n return output", "def add_matrices(m1, m2):\n\t\n\ttemp = []\n\tfor i in range(len(m1)):\n\t\te = []\n\t\tfor j in range(len(m1[0])):\n\t\t\te.append(m1[i][j]+m2[i][j])\n\t\ttemp.append(e)\n\treturn temp", "def add(self,mat1,mat2):\n if(np.shape(mat1)==np.shape(mat2)):\n result = [[mat1[i][j] + mat2[i][j] for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(isinstance(mat2,int)==True):\n result = [[mat1[i][j] + mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n else:\n print('wrong format')", "def matrixAddition(self, a, b):\n\t\n\t\tnumRows = len(a)\n\t\tnumCols = len(a[0])\n\t\n\t\treturn [[a[j][i] + b[j][i] for i in range(numCols)] for j in range(numRows)]", "def add_matrices(m1, m2):\n\t\n\t\n\treturn [[a+b for a, b in izip(m1, m2)] for m1, m2 in izip(m1, m2)]", "def __add__(self, oth):\n\t\tif not isinstance(oth, Matrix):\n\t\t\toth = Matrix(oth)\n\t\treturn self._add(oth)", "def add_matrices(x, y):\n return [[x[i][j] + y[i][j] for j in range(len(x[0]))] for i in range(len(x))]", "def sum(m1, m2):\n return np.matrix('')", "def add_matrices(mat1, mat2):\n if mat_shape(mat1) != mat_shape(mat2):\n return None\n if type(mat1[0]) == int:\n return [mat1[i] + mat2[i] for i in range(len(mat1))]\n else:\n return [add_matrices(mat1[i], mat2[i]) for i in range(len(mat1))]", "def __radd__(self, oth):\n\t\toth_m = oth\n\t\tif not isinstance(oth_m, Matrix):\n\t\t\toth_m = Matrix(oth_m)\n\t\tres_m = oth_m._add(self)\n\t\tif isinstance(oth,Matrix):\n\t\t\treturn res_m\n\t\telse:\n\t\t\treturn type(oth)(res_m._unnest())", "def __add__(self, other):\n if not issubclass(type(other), Matrix):\n raise TypeError(type(other))\n\n if self.rows != other.rows or self.columns != other.columns:\n raise ValueError(\"Sizes should be equivalent\")\n\n result = [ x + y for x, y in zip(self.data, other.data)]\n return Matrix(self.rows, self.columns, data = result)", "def basic_add(mv1, mv2):\n obj = expand(mv1.obj + mv2.obj)\n return MV(obj)", "def __add__(self, m):\n\n nv=Matrice()\n if self.__mm_type(m):\n ls=len(self)\n nv.generate(ls,self.desc())\n for i in self.desc():\n for j in range(len(self)):\n nv.g_val(self.val(i,j)+m.val(i,j),i,j)\n return nv", "def __add__(self, other):\n # other is scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Matrix([[p + other for p in row] for row in self.data])\n # other is Matrix\n elif isinstance(other, Matrix):\n if self.n_cols == other.n_cols and self.n_rows == other.n_rows:\n return Matrix([[self.data[row][col] + other.data[row][col]\n for col in range(self.n_cols)] for row in range(self.n_rows)])\n else:\n raise ValueError('Dimensions of matrices do not match')\n else:\n raise TypeError('Matrix can only be divided by a scalar')", "def __add__(self, other):\n if not isinstance(other, Matrix):\n return NotImplemented\n\n if self.num_cols != other.num_cols:\n raise ValueError((\"A matrix with %d columns cannot be added to \"\n \"a matrix with %d columns\") % (self.num_cols, other.num_cols))\n\n if self.num_rows != other.num_rows:\n raise ValueError((\"A matrix with %d rows cannot be added to \"\n \"a matrix with %d rows\") % (self.num_cols, other.num_cols))\n\n new_mat = self.__class__(self.num_rows, other.num_rows)\n for i in xrange(self.num_rows):\n for j in xrange(self.num_cols):\n new_mat[i, j] = self[i, j] + other[i, j]\n\n return new_mat", "def addition(self):\n try:\n addition = self.matrix1 + self.matrix2\n except Exception as e:\n return \"Error: {}\".format(e)\n\n return addition", "def mat_addition(A, B):\n # Return False if shape doesn't match\n if np.shape(A) != np.shape(B):\n return False\n\n #create empty array - shape n, q\n result = np.zeros(np.shape(A))\n #If matrix do this\n if A.ndim == 2:\n #update each cell of result array by adding correct cells\n for r in range(np.shape(A)[0]):\n for c in range(np.shape(A)[1]):\n result[r,c] = A[r,c] + B[r,c]\n return result\n #If vector do this\n elif A.ndim == 1:\n for i in range(len(A)):\n result[i] = A[i] + B[i]\n return result\n #Else return False\n else:\n return False", "def __add__(self, other_matrix):\n # flag - length matrix1 == length matrix2\n hasError = False\n # check lengths\n if len(self.data_list) != len(other_matrix.data_list):\n hasError = True\n else:\n for i in range(len(self.data_list)):\n if len(self.data_list[i]) != len(other_matrix.data_list[i]):\n hasError = True\n if hasError:\n print(f'Matrix length are not equals')\n return 0\n\n new_data_list = list()\n # adding each items\n for i in range(len(self.data_list)):\n new_inner_data_list = list()\n for j in range(len(data_list[i])):\n new_inner_data_list.append(self.data_list[i][j] + other_matrix.data_list[i][j])\n new_data_list.append(new_inner_data_list)\n\n return Matrix(new_data_list)", "def add(self, B: 'Matrix') -> 'Matrix':\n assert self.shape() == B.shape(), f\"For addition, matrices must have same shape. These are {self.shape()} and {B.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Remember, you need to create a new matrix to put the results\n C = Matrix.zeros(B.shape())\n for i in range(B.shape()[0]):\n for j in range(B.shape()[1]):\n C.mat[i][j] = self.mat[i][j]+B.mat[i][j]\n return C\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------", "def matrix_add(A,B):\n\n\tif len(A) != len(B) or len(A[0]) != len(B[0]):\n\t\tprint('A and B are of different dimensions')\n\t\treturn\n\tsum = []\n\tm = len(A)\n\tn = len(A[0])\n\tfor i in range(m):\n\t\tsum.append([])\n\t\tfor j in range(n):\n\t\t\tentry = A[i][j] + B[i][j]\n\t\t\tsum[i].append(entry)\n\treturn sum", "def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __add__(self, obj):\n if not (type(self) == type(obj)):\n return NotImplemented\n if not (self.size == obj.size):\n raise ValueError(\"Matrices must be the same size for '+'\")\n returnvalue = Matrix()\n for i in range(self._height):\n currentRow = list()\n for j in range(self._width):\n currentRow.append(self._value[i][j] + obj.value[i][j])\n returnvalue.addRow(*currentRow)\n return returnvalue", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def add_matrices2D(mat1, mat2):\n\n if matrix_shape(mat1) != matrix_shape(mat2):\n return None\n\n range_ax0 = range(len(mat1)) # range of axis 0\n range_ax1 = range(len(mat1[0])) # range of axis 1\n\n return [[mat1[i][j] + mat2[i][j] for j in range_ax1] for i in range_ax0]", "def __add__(self, other):\n\n if other == 0:\n return self\n else:\n L = self.matrix.copy()\n L.shift(1, other.matrix)\n return _PysparseMatrix(matrix=L)", "def add_matrices(mat1, mat2):\n\n new_matrix = []\n\n if (len(mat1) != len(mat2)):\n return None\n\n if (type(mat1) == list and type(mat1[0]) == list):\n for i in range(len(mat1)):\n value = add_matrices(mat1[i], mat2[i])\n if value is not None:\n new_matrix.append(value)\n else:\n return None\n else:\n for i in range(len(mat1)):\n new_matrix.append(mat1[i] + mat2[i])\n\n return new_matrix", "def _addMats(X1,X2):\n _checkSize(X1,X2)\n return [ _addVectors(X1[i],X2[i]) for i in range(len(X1))]", "def __add__(self, B):\n m, n = self.shape\n try:\n k, r = B.shape\n except AttributeError: # treat B as constant\n c = mpfr(B)\n sum_ = dict()\n for i in range(m):\n for j in range(n):\n sum_[i, j] = self[i, j] + c\n return MPMatrix((m, n), sum_)\n\n assert (m == k\n and n == r), (\"Cannot add shapes ({}, {}) and ({}, {})\".format(\n m, n, k, r))\n sum_ = dict()\n for i in range(m):\n for j in range(n):\n sum_[i, j] = self[i, j] + B[i, j]\n return MPMatrix((m, n), sum_)", "def __iadd__(self, m):\n if self.__mm_type(m):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)+m.val(i,j),i,j)\n return self", "def matrix_addition(A, B):\n # Section 1: Ensure dimensions are valid for matrix addition\n rowsA = len(A); colsA = len(A[0])\n rowsB = len(B); colsB = len(B[0])\n if rowsA != rowsB or colsA != colsB:\n raise ArithmeticError('Matrices are NOT the same size.')\n\n # Section 2: Create a new matrix for the matrix sum\n C = zeros_matrix(rowsA, colsB)\n\n # Section 3: Perform element by element sum\n for i in range(rowsA):\n for j in range(colsB):\n C[i][j] = A[i][j] + B[i][j]\n\n return C", "def __add__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj + other\n newValue = self.value + other.value\n\n return self._newMV(newValue)", "def matrixSum( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n soma = matrix0[ key ][ i ] + matrix1[ key ][ i ]\r\n result[ key ].append( soma )\r\n \r\n return result", "def add(A, B):\n A._check('+', B, A.shape, B.shape)\n return A.from_rep(A.rep.add(B.rep))", "def add_matrices2D(mat1, mat2):\n if len(mat1) != len(mat2):\n return None\n if len(mat1[0]) != len(mat2[0]):\n return None\n return [[ele1 + ele2 for ele1, ele2 in zip(row1, row2)]\n for row1, row2 in zip(mat1, mat2)]", "def matrix_mult(m1, m2):\n pass", "def madd(self, matrix):\n try:\n result_matrix = [[0 for col in range(len(matrix[0]))] for row in range(len(matrix))]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n result_matrix[i][j] = self.matrix[i][j] + matrix[i][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def Concat(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Concat(*args, **kwargs)", "def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf", "def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])", "def matrix_add(x,y) -> [[]]:\n X = x\n\n Y = y\n\n if len(X) == len(Y) and len(X[0]) == len(Y[0]):\n return [[X[a][b] + Y[a][b] for b in range(len(X[0]))]\n for a in range(len(X))]", "def __add__(self, other):\n var_set = set(self.variables.keys()) | set(other.variables.keys())\n result = {}\n for v in var_set:\n a, b = self.variables.get(v, 0), other.variables.get(v, 0)\n a, b = self._broadcast(a, b)\n result[v] = a + b\n return MultivariateDerivative(result)", "def sum_matrixes(A, B):\n\n\tD = np.zeros((3,3))\n\tD[0][0] = A[0][0]+ B[0][0]\n\tD[0][1] = A[0][1]+ B[0][1]\n\tD[0][2] = A[0][2]+ B[0][2]\n\tD[1][0] = A[1][0]+ B[1][0]\n\tD[1][1] = A[1][1]+ B[1][1]\n\tD[1][2] = A[1][2]+ B[1][2]\n\tD[2][0] = A[2][0]+ B[2][0]\n\tD[2][1] = A[2][1]+ B[2][1]\n\tD[2][2] = A[2][2]+ B[2][2]\n\treturn D", "def matrixAddition(firstMatrix, secondMatrix):\n if len(firstMatrix) == len(secondMatrix): # Checks whether the matrices can be added or not\n print(firstMatrix)\n print(secondMatrix)\n additionMatrix = [] # Vector that will contain the full matrix\n for i in range(len(firstMatrix)): # Will through each row in the matrix\n matrix = [] # Vector that will get all the rows in the matrix\n for j in range(len(secondMatrix[0])): # Will go through each column\n sumNum = firstMatrix[i][j] + secondMatrix[i][j] # Will add each individual cell\n matrix.append(sumNum)\n print(matrix)\n additionMatrix.append(matrix)\n print(\"The sum of the two matrices is: \" + str(additionMatrix))\n else:\n print(\"This operation cannot be done because the dimensions of the matrices are not the same. \"\n \"Try again :)\")", "def sum(self, other):\n if is_matrix(other):\n return self._sum_matrix(other, 1)\n elif mathutil.is_scalar(other):\n return self._sum_scalar(other, 1)\n else:\n self._logger.error(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))\n raise TypeError(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))", "def colo_addmm(input_tensor: GeneralTensor,\n mat1: ColoTensor,\n mat2: ColoTensor,\n beta: Number = 1,\n alpha: Number = 1,\n **kargs) -> ColoTensor:\n # At least one of the tensor should be ColoTensor\n assert isinstance(mat2, ColoTensor)\n input_tensor = convert_to_colo_tensor(input_tensor, mat2.get_process_group())\n mat1 = convert_to_colo_tensor(mat1, mat2.get_process_group())\n\n # Add communication logic before and after linear call.\n ret_tensor = None\n if not mat2.has_compute_spec(): # No Model Parallel Applied\n assert mat2.is_replicate(), 'Invalid mat2 spec for native addmm op'\n assert input_tensor.is_replicate(), 'Invalid input spec for native addmm op'\n ret_tensor = ColoTensor.from_torch_tensor(tensor=torch.addmm(input_tensor,\n mat1,\n mat2,\n beta=beta,\n alpha=alpha,\n **kargs),\n spec=ColoTensorSpec(mat2.get_process_group()))\n elif mat2.has_compute_pattern(ComputePattern.TP1D): # Single Model Parallel Applied\n if mat2.is_shard_1drow() and input_tensor.is_replicate():\n mode = 'row'\n elif mat2.is_shard_1dcol() and (input_tensor.is_shard_1dcol() or input_tensor.is_shard_1drow()):\n mode = 'col'\n else:\n raise NotImplementedError\n ret_tensor = colo_addmm_1d(mode, input_tensor, mat1, mat2, beta, alpha)\n else:\n raise NotImplementedError\n\n return ret_tensor", "def __mul__(self, other):\n if self.n != other.m:\n raise TypeError(\"Illegal dimensions for mul operator\")\n tmp = [[0 for _ in xrange(self.n)] for _ in xrange(other.m)]\n for i in xrange(self.n):\n for j in xrange(other.m):\n for k in xrange(other.n):\n tmp[i][j] += self.values[i][k] * other.values[k][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.n, other.m, res)", "def __add__(self, obj):\n if isinstance(obj, Matrix):\n if self.m != obj.m or self.n != obj.n:\n raise exc.ComformabilityError(\n \"matrices must have the same dimensions\")\n if type(self) is not type(obj):\n raise TypeError(\"matrices must be the same type\")\n data = [[self[i, j] + obj[i, j]\n for j in range(self.n)]\n for i in range(self.m)]\n elif Matrix.is_numeric(obj):\n self._validate_scalar(obj)\n data = [[self[i, j] + obj\n for j in range(self.n)]\n for i in range(self.m)]\n else:\n raise TypeError(\n \"cannot add object of type \" + type(obj).__name__ +\n \" to matrix\")\n return self.__class__(self.m, self.n, data)", "def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary", "def __radd__(self, other):\n return asarray(add(numpy.asarray(other), self))", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def __mul__(self, other):\r\n T = type(other)\r\n # mat4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x*other, self.mlist))\r\n # mat4*vec3\r\n if isinstance(other, _vec3):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n w = float(m41*other.x + m42*other.y + m43*other.z + m44)\r\n return _vec3(m11*other.x + m12*other.y + m13*other.z + m14, \r\n m21*other.x + m22*other.y + m23*other.z + m24, \r\n m31*other.x + m32*other.y + m33*other.z + m34)/w\r\n # mat4*vec4\r\n if isinstance(other, _vec4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w, \r\n m21*other.x + m22*other.y + m23*other.z + m24*other.w, \r\n m31*other.x + m32*other.y + m33*other.z + m34*other.w,\r\n m41*other.x + m42*other.y + m43*other.z + m44*other.w)\r\n # mat4*mat4\r\n if isinstance(other, mat4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist\r\n return mat4( m11*n11+m12*n21+m13*n31+m14*n41,\r\n m11*n12+m12*n22+m13*n32+m14*n42,\r\n m11*n13+m12*n23+m13*n33+m14*n43,\r\n m11*n14+m12*n24+m13*n34+m14*n44,\r\n\r\n m21*n11+m22*n21+m23*n31+m24*n41,\r\n m21*n12+m22*n22+m23*n32+m24*n42,\r\n m21*n13+m22*n23+m23*n33+m24*n43,\r\n m21*n14+m22*n24+m23*n34+m24*n44,\r\n\r\n m31*n11+m32*n21+m33*n31+m34*n41,\r\n m31*n12+m32*n22+m33*n32+m34*n42,\r\n m31*n13+m32*n23+m33*n33+m34*n43,\r\n m31*n14+m32*n24+m33*n34+m34*n44,\r\n\r\n m41*n11+m42*n21+m43*n31+m44*n41,\r\n m41*n12+m42*n22+m43*n32+m44*n42,\r\n m41*n13+m42*n23+m43*n33+m44*n43,\r\n m41*n14+m42*n24+m43*n34+m44*n44)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def addmul(a,b):\n return a*b+a*b", "def add(a, b):\n return np.array([x + y for x, y in zip(a, b)])", "def __add__(self, other):\n h, w = self.size\n col_indices = self.col_indices + [w + i for i in other.col_indices]\n row_indices = self.row_indices + other.row_indices\n values = self.values + other.values\n oh, ow = other.size\n size = [max(h, oh), w + ow]\n return Sparse(size, row_indices, col_indices, values)", "def __iadd__(self, other):\n #print \"adding \", other, \" to \", self\n # need to do shallow copy, or otherwise smth like \"cm += cm\"\n # would loop forever and exhaust memory eventually\n othersets = copy.copy(other.__sets)\n for set in othersets:\n self.add(*set)#[0], set[1])\n return self", "def __matmul__(self, other):\n return F.MatMul.apply(self, other)", "def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)", "def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)", "def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])", "def __add__(self,other):\n return Vector(self.x+other.x,self.y+other.y,self.z+other.z)", "def logical_matadd(mat1, mat2):\n S1 = mat1.shape\n S2 = mat2.shape\n mat = np.zeros(S1, dtype=int)\n if S1 != S2:\n raise HyperNetXError(\"logical_matadd called for matrices with different dimensions\")\n if len(S1) == 1:\n for idx in range(S1[0]):\n mat[idx] = 1 * np.logical_xor(mat1[idx], mat2[idx])\n else:\n for idx in range(S1[0]):\n for jdx in range(S1[1]):\n mat[idx, jdx] = 1 * np.logical_xor(mat1[idx, jdx], mat2[idx, jdx])\n return mat", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n newmat = make_matrix(self.rows, self.cols)\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n newmat[i, j] = self[i, j] * other\n elif isinstance(other, Matrix):\n if self.cols != other.rows:\n raise IndexError(\"Row/column mismatch: (%i, %i) x (%i, %i)\"%\n (self.rows, self.cols, other.rows, other.cols))\n\n newmat = make_matrix(self.rows, other.cols)\n\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n for k in range(self.cols):\n newmat[i, j] += self[i, k] * other[k, j]\n return newmat", "def __iadd__(self, other):\n self.components = [c1 + c2 for (c1, c2) in zip(self, other)]\n return self", "def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ", "def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self", "def add(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def __iadd__(self, other):\n if not isinstance(other, KGCorrelation):\n raise TypeError(\"Can only add another KGCorrelation object\")\n if not (self._nbins == other._nbins and\n self.min_sep == other.min_sep and\n self.max_sep == other.max_sep):\n raise ValueError(\"KGCorrelation to be added is not compatible with this one.\")\n\n self._set_metric(other.metric, other.coords, other.coords)\n self.xi.ravel()[:] += other.xi.ravel()[:]\n self.xi_im.ravel()[:] += other.xi_im.ravel()[:]\n self.meanr.ravel()[:] += other.meanr.ravel()[:]\n self.meanlogr.ravel()[:] += other.meanlogr.ravel()[:]\n self.weight.ravel()[:] += other.weight.ravel()[:]\n self.npairs.ravel()[:] += other.npairs.ravel()[:]\n return self", "def matmul(a, b):\n raise NotImplementedError", "def __mul__(self, other):\n if not isinstance(other, Matrix):\n return NotImplemented\n\n if self.num_cols != other.num_rows:\n raise ValueError(\"self.num_cols (%d) != other.num_rows (%d)\" % (self.num_cols, other.num_cols))\n\n new_mat = Matrix(self.num_rows, other.num_cols)\n\n # iterate through rows of self\n for i in range(self.num_rows):\n # iterate through columns of other\n for j in range(other.num_cols):\n # iterate through rows of other\n for k in range(other.num_rows):\n new_mat[i, j] += self[i, k] * other[k, j]\n\n return new_mat", "def __add__(self, other):\n return asarray(add(self, other))", "def __add__(self, other):\n return add_mps(self, other)", "def add(q_1: Q, q_2: Q) -> Q:\n\n q_1.check_representations(q_2)\n\n add_q_type = f\"{q_1.q_type}+{q_2.q_type}\"\n\n t_1, x_1, y_1, z_1 = q_1.t, q_1.x, q_1.y, q_1.z\n t_2, x_2, y_2, z_2 = q_2.t, q_2.x, q_2.y, q_2.z\n\n add_q = Q(q_type=add_q_type, representation=q_1.representation)\n add_q.t = t_1 + t_2\n add_q.x = x_1 + x_2\n add_q.y = y_1 + y_2\n add_q.z = z_1 + z_2\n\n return add_q", "def concatenate(self, other):\n assert self.same_col_labels_as(other)\n newlabels = list(self.row_labels) + list(other.row_labels)\n return DenseMatrix(np.concatenate([self, other]), newlabels, self.col_labels)", "def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k in range(sa[1]):\n val+=matGet(a,i,k)*matGet(b,k,j)\n matSet(ret,i,j,val)\n return ret", "def __add__(self, other):\n pmf = Pmf()\n for key1, prob1 in self.items():\n for key2, prob2 in other.items():\n pmf[key1 + key2] += prob1 * prob2\n return pmf", "def add_(self, other: 'ModelParameters'):\n for idx in range(len(self)):\n self.parameters[idx] += other[idx]", "def __iadd__(self, other):\n self.components = [c1 + c2 for (c1, c2) in zip(self.components, other.components)]\n return self", "def ADD (self, n1, n2):", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n\n matrix_sum = []\n for i, j in zip(arr1, arr2):\n matrix_sum.append(i + j)\n return matrix_sum", "def __add__(self, other):\n new_measure = Measure()\n settings = [\"raw\", \"fil\"]\n\n for rf in settings:\n new_measure.hit1[rf] = (self.hit1[rf] + other.hit1[rf])\n new_measure.hit3[rf] = (self.hit3[rf] + other.hit3[rf])\n new_measure.hit10[rf] = (self.hit10[rf] + other.hit10[rf])\n new_measure.mrr[rf] = (self.mrr[rf] + other.mrr[rf])\n new_measure.mr[rf] = (self.mr[rf] + other.mr[rf])\n return new_measure", "def __radd__(self, other) -> 'Tensor':\n return _add(ensure_tensor(other), self)", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def _generic_canonical_add(mi1, c1, mi2, c2):\r\n\r\n len1, dim1 = mi1.shape\r\n len2, dim2 = mi2.shape # assume m1 and m2 are same dimension -> expand_dim of the mi with smaller dim\r\n\r\n mi1r = mi1.reshape(len1, 1,dim1)\r\n mi2r = mi2.reshape(1, len2,dim1)\r\n\r\n add_cond = np.equal(mi1r, mi2r).all(axis=2, keepdims=False) # where are mi1 and mi2 equal along the dim?\r\n add_cond1 = add_cond.any(axis=1) # added condition for m1,c1\r\n\r\n not_add_cond1 = np.logical_not(add_cond1) # not added condition for m1,c1\r\n add_cond2 = add_cond.any(axis=0) # add condition for m2,c2\r\n not_add_cond2 = np.logical_not(add_cond2) # not added condition for m2,c2\r\n\r\n # build resulting mi\r\n added_mi = mi1[add_cond1,:] # shall be the same as mi2[add_cond2,:]\r\n not_added_mi = np.concatenate((mi1[not_add_cond1,:], mi2[not_add_cond2,:]), axis=0) # collect the rest\r\n res_mi = np.concatenate((added_mi, not_added_mi), axis=0) # summed mi shall be the first\r\n sort_args = np.lexsort(res_mi.T,axis=-1)\r\n\r\n # build resulting coeffs\r\n added_c = c1[add_cond1] + c2[add_cond2]\r\n not_added_c = np.concatenate((c1[not_add_cond1], c2[not_add_cond2]))\r\n res_c = np.concatenate((added_c, not_added_c))\r\n\r\n return res_mi[sort_args], res_c[sort_args]" ]
[ "0.81966865", "0.8028442", "0.7911417", "0.784823", "0.7748032", "0.77048737", "0.7664608", "0.7599502", "0.75470144", "0.75353587", "0.75039405", "0.74704564", "0.74508417", "0.73524606", "0.73200405", "0.7277604", "0.724819", "0.7234582", "0.71502364", "0.7145327", "0.7096349", "0.70222175", "0.699576", "0.6949628", "0.69360805", "0.69300354", "0.69292176", "0.6912583", "0.68870616", "0.6876231", "0.6843045", "0.68196446", "0.6787352", "0.6716825", "0.66622853", "0.66593915", "0.6656594", "0.6650784", "0.6642212", "0.660058", "0.6564998", "0.6564524", "0.6564232", "0.65635955", "0.654592", "0.6510174", "0.65003896", "0.64820915", "0.6467452", "0.6435662", "0.64319074", "0.63795936", "0.63710165", "0.6304308", "0.62788284", "0.6278528", "0.62635297", "0.6259054", "0.62552184", "0.62251884", "0.6210282", "0.6202227", "0.62017906", "0.61837727", "0.61720586", "0.6169514", "0.61597407", "0.6159274", "0.6157447", "0.6146257", "0.6136078", "0.6132978", "0.61137617", "0.6112284", "0.61107326", "0.60917234", "0.60657686", "0.6059876", "0.6058759", "0.60578394", "0.605432", "0.60475814", "0.6045266", "0.6044855", "0.60404575", "0.60383683", "0.6035682", "0.603124", "0.6027057", "0.6020207", "0.6012538", "0.60067403", "0.5991112", "0.5989544", "0.59860307", "0.59822536", "0.59764427", "0.5975722", "0.5968307", "0.596416" ]
0.6929311
26
Multiply two matricies together
def __mul__(self, otherMatrix): if not (len(self.array[0]) == len(otherMatrix.array)): raise ArithmeticError common = len(self.array[0]) X = len(self.array) Y = len(otherMatrix.array[0]) newArray = [[0 for x in range(X)] for x in range(Y)] for row in range(X): for col in range(Y): for elem in range(common): newArray[row][col] += self.array[row][elem] * otherMatrix.array[elem][col] return matrix(newArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matmul(a, b):\n raise NotImplementedError", "def matrix_mult(m1, m2):\n pass", "def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k in range(sa[1]):\n val+=matGet(a,i,k)*matGet(b,k,j)\n matSet(ret,i,j,val)\n return ret", "def matmul(x, y):\n return np.matmul(x, y)", "def __mul__(self, other):\n if self.n != other.m:\n raise TypeError(\"Illegal dimensions for mul operator\")\n tmp = [[0 for _ in xrange(self.n)] for _ in xrange(other.m)]\n for i in xrange(self.n):\n for j in xrange(other.m):\n for k in xrange(other.n):\n tmp[i][j] += self.values[i][k] * other.values[k][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.n, other.m, res)", "def __matmul__(self, other):\n return F.MatMul.apply(self, other)", "def __mul__(self, other):\r\n T = type(other)\r\n # mat4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x*other, self.mlist))\r\n # mat4*vec3\r\n if isinstance(other, _vec3):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n w = float(m41*other.x + m42*other.y + m43*other.z + m44)\r\n return _vec3(m11*other.x + m12*other.y + m13*other.z + m14, \r\n m21*other.x + m22*other.y + m23*other.z + m24, \r\n m31*other.x + m32*other.y + m33*other.z + m34)/w\r\n # mat4*vec4\r\n if isinstance(other, _vec4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w, \r\n m21*other.x + m22*other.y + m23*other.z + m24*other.w, \r\n m31*other.x + m32*other.y + m33*other.z + m34*other.w,\r\n m41*other.x + m42*other.y + m43*other.z + m44*other.w)\r\n # mat4*mat4\r\n if isinstance(other, mat4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist\r\n return mat4( m11*n11+m12*n21+m13*n31+m14*n41,\r\n m11*n12+m12*n22+m13*n32+m14*n42,\r\n m11*n13+m12*n23+m13*n33+m14*n43,\r\n m11*n14+m12*n24+m13*n34+m14*n44,\r\n\r\n m21*n11+m22*n21+m23*n31+m24*n41,\r\n m21*n12+m22*n22+m23*n32+m24*n42,\r\n m21*n13+m22*n23+m23*n33+m24*n43,\r\n m21*n14+m22*n24+m23*n34+m24*n44,\r\n\r\n m31*n11+m32*n21+m33*n31+m34*n41,\r\n m31*n12+m32*n22+m33*n32+m34*n42,\r\n m31*n13+m32*n23+m33*n33+m34*n43,\r\n m31*n14+m32*n24+m33*n34+m34*n44,\r\n\r\n m41*n11+m42*n21+m43*n31+m44*n41,\r\n m41*n12+m42*n22+m43*n32+m44*n42,\r\n m41*n13+m42*n23+m43*n33+m44*n43,\r\n m41*n14+m42*n24+m43*n34+m44*n44)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def matmul(A, B):\n\n A._check('*', B, A.shape[1], B.shape[0])\n return A.from_rep(A.rep.matmul(B.rep))", "def __mul__(self, other):\n return Matrix3(\n self.i * other,\n self.j * other,\n self.k * other,\n )", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def mul(self, a, b):\n return a * b", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n newmat = make_matrix(self.rows, self.cols)\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n newmat[i, j] = self[i, j] * other\n elif isinstance(other, Matrix):\n if self.cols != other.rows:\n raise IndexError(\"Row/column mismatch: (%i, %i) x (%i, %i)\"%\n (self.rows, self.cols, other.rows, other.cols))\n\n newmat = make_matrix(self.rows, other.cols)\n\n for i in range(newmat.rows):\n for j in range(newmat.cols):\n for k in range(self.cols):\n newmat[i, j] += self[i, k] * other[k, j]\n return newmat", "def __mul__(self, other):\n if hasattr(other, 'as_homogenous_transformation'):\n return basis(homogenous_transformation = self.as_homogenous_transformation() * other.as_homogenous_transformation())\n elif hasattr(other, 'n'):\n if other.n == (3,1):\n b = matrix.col((other[0], other[1], other[2], 1))\n elif other.n == (4,1):\n b = other\n else:\n raise TypeError(b, \"Incompatible matrices\")\n p = self.as_homogenous_transformation() * b\n if other.n == (3,1):\n return matrix.col(p[0:3])\n else:\n return p\n else:\n raise TypeError(b)", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def lazy_matrix_mul(m_a, m_b):\n m_a = np.array(m_a)\n m_b = np.array(m_b)\n\n return m_a.dot(m_b)", "def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out", "def mult(self, other):\r\n A = np.dot(self.M, other.M)\r\n B = np.dot(self.M, other.N) + np.dot(self.N, other.M) + \\\r\n self.k*np.dot(self.N, other.N)\r\n return MyForm(A, B, self.k)", "def matmul(self, other):\n shape = (self.rows, other.cols)\n return self._new(self.rep * other.rep, shape, self.domain)", "def mat_mul(mat1, mat2):\n\n rows1 = len(mat1)\n cols1 = len(mat1[0])\n rows2 = len(mat2)\n cols2 = len(mat2[0])\n\n if cols1 != rows2:\n return None\n else:\n new_matrix = []\n for x in range(rows1):\n aux_row = []\n for y in range(cols2):\n aux_sum = []\n for z in range(cols1):\n aux_sum.append(mat1[x][z] * mat2[z][y])\n aux_row.append(sum(aux_sum))\n new_matrix.append(aux_row)\n\n return new_matrix", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def __matmul__(self, other):\n if isinstance(other, types.Vector):\n return self.apply(target=other)\n if isinstance(other, MatrixLieGroup):\n return self.multiply(other=other)\n else:\n assert False, \"Invalid argument\"", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix([[self.values[row][index] * other\n for index in range(len(self.values[0]))]\n for row in range(len(self.values))])\n\n elif isinstance(other, Vector):\n return Vector([other.dot(Vector(row)) for row in self.values])\n\n elif isinstance(other, Matrix):\n return Matrix([(other.transpose() * Vector(row)).values\n for row in self.values])", "def mul(x, y):\n return multiply(x, y)", "def __mul__(self, other):\n new_matrix = np.dot(self.affine_matrix, other.affine_matrix)\n return SymmOp(new_matrix)", "def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None", "def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)", "def __mul__(self, other):\r\n return self.prod(other)", "def lazy_matrix_mul(m_a, m_b):\n return np.matmul(np.array(m_a), np.array(m_b))", "def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)", "def __mul__(self, other):\n\n # Scalar multiplication\n if isinstance(other, (int, long, float, complex)):\n return Matrix(self.rows, self.columns, [other * x for x in self.data])\n\n if not issubclass(type(other), Matrix):\n raise TypeError(type(other))\n\n if self.columns != other.rows:\n raise ValueError(\"Undefined multiplication for these matrices\")\n\n result = []\n for i in range(1, self.rows + 1):\n row = self.row(i)\n result.extend([dot_product(row, other.column(j)) for j in range(1, other.columns + 1)])\n\n return Matrix(self.rows, other.columns, data = result)", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def __mul__(self, other):\n N = self.matrix.shape[1]\n\n if isinstance(other, _PysparseMatrix):\n return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, other.matrix))\n else:\n shape = numerix.shape(other)\n if shape == ():\n L = spmatrix.ll_mat(N, N, N)\n L.put(other * numerix.ones(N, 'l'))\n return _PysparseMatrix(matrix=spmatrix.matrixmultiply(self.matrix, L))\n elif shape == (N,):\n y = numerix.empty((self.matrix.shape[0],))\n self.matrix.matvec(other, y)\n return y\n else:\n raise TypeError", "def multiplies(x, y):\n x[:] *= y[:]\n return x", "def matrixMultiply(a, colsA, b, colsB):\r\n\trowsA = len(a)\r\n\trowsB = len(b)\r\n\r\n\t# rowsA x colsA ... rowsB x colsB \r\n\tassert rowsA == colsB, \"matrix dimensions not fit for multiplication\"\r\n\r\n\t# result size: rowsA x colsB\r\n\tr = rowsA * [None]\r\n\tfor i in range(rowsA):\r\n\t\tr[i] = colsB * [None]\r\n\t\tfor j in range(colsB):\r\n\t\t\t\tr[i][j] = sum( a[i][k]* b[k][j] for k in range(colsA))\r\n\treturn r", "def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))", "def mul(x, y):\n # dispatch to sparse methods\n if issparse(x):\n return x.multiply(y)\n elif issparse(y):\n return y.multiply(x)\n\n return mul_dense(x, y)", "def matrix_multiply(m1, m2):\n\n\tproduct = numpy.matmul(m1, m2)\n\tif type(product) == numpy.int64:\n\t\treturn float(product)\n\telse:\n\t\tresult = list(product)\n\t\treturn result", "def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))", "def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M", "def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new", "def __matmul__(self, B):\n m, n = self.shape\n n_, r = B.shape\n assert n == n_, (\"Cannot multiply shapes \"\n \"({}, {}) and ({}, {})\".format(m, n, n_, r))\n mul_ = dict()\n # compute A_ik = sum_j A_ij*B_jk\n for i in range(m):\n for k in range(r):\n prod = mpfr(0)\n for j in range(n):\n prod += self[i, j] * B[j, k]\n mul_[i, k] = prod\n return MPMatrix((m, r), mul_)", "def _mul(a, b):\n return a * b", "def mul(Z,X,Y):", "def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def mul(self, other):\n return self._new_rep(self.rep * other)", "def __mul__(self, other):\n if not isinstance(other, Matrix):\n return NotImplemented\n\n if self.num_cols != other.num_rows:\n raise ValueError(\"self.num_cols (%d) != other.num_rows (%d)\" % (self.num_cols, other.num_cols))\n\n new_mat = Matrix(self.num_rows, other.num_cols)\n\n # iterate through rows of self\n for i in range(self.num_rows):\n # iterate through columns of other\n for j in range(other.num_cols):\n # iterate through rows of other\n for k in range(other.num_rows):\n new_mat[i, j] += self[i, k] * other[k, j]\n\n return new_mat", "def np_matmul(mat1, mat2):\n return mat1.dot(mat2)", "def matmul(A, B):\n # type: (Optional[Tensor], Tensor) -> Tensor\n if A is None:\n return B\n if is_sparse(A):\n return torch.sparse.mm(A, B)\n return torch.matmul(A, B)", "def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def matrix_multiply(A,B):\n rowsA = len(A)\n colsA = len(A[0])\n\n rowsB = len(B)\n colsB = len(B[0])\n\n if colsA != rowsB:\n raise ArithmeticError('Number of A columns must equal number of B rows.')\n\n C = zeros_matrix(rowsA, colsB)\n\n for i in range(rowsA):\n for j in range(colsB):\n total = 0\n for ii in range(colsA):\n total += A[i][ii] * B[ii][j]\n C[i][j] = total\n\n return C", "def __mul__(self, oth):\n\t\tif isinstance(oth, Matrix) or isiterable(oth):\n\t\t\t# matrix\n\t\t\toth_m = oth\n\t\t\tif not isinstance(oth_m, Matrix):\n\t\t\t\toth_m = Matrix(oth_m)\t\t\t\n\t\t\tres_m = self._mat_mul(oth_m)\n\t\t\tif isinstance(oth, Matrix):\n\t\t\t\treturn res_m\n\t\t\telse:\n\t\t\t\treturn type(oth)(res_m._unnest())\n\t\telse:\n\t\t\t# scalar\n\t\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] * oth, self.rows, self.cols)", "def __mul__(self,other):\n # \n # 注意矩阵的A的列 与 相乘矩阵B的行必须相等,才能进行运算\n height = 0\n width = 0\n if isinstance(other, list): # 判断other是否是矩阵,即list形式的矩阵\n height = len(other)\n width = len(other[0])\n else:\n # 如果是对象,则直接获取行列值\n height = other.h\n width = other.w\n\n\n my_mul = zeroes(self.h, self.w)\n if self.w == height: # 两个矩阵的行列值需要相等 才能相乘\n for i in range(self.h):\n for j in range(width):\n my_sum = 0\n for k in range(height):\n if isinstance(other, list):\n my_sum += self.g[i][k] * other[k][j]\n # 通过3个循环变量取所有矩阵的行列值\n else:\n my_sum += self.g[i][k] * other.g[k][j]\n my_mul[i][j] = my_sum\n return my_mul \n else:\n return NotImplementedError", "def mul_elementwise(self, other):\n # XXX: flint matrices do not support elementwise multiplication\n return self.to_ddm().mul_elementwise(other.to_ddm()).to_dfm()", "def mul(x, y):\n return x * y", "def mul(x, y):\n return x * y", "def matrix_mul(m_a, m_b):\n if not isinstance(m_a, list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b, list):\n raise TypeError(\"m_b must be a list\")\n if not all(isinstance(lst, list) for lst in m_a):\n raise TypeError(\"m_a must be a list of lists\")\n if not all(isinstance(lst, list) for lst in m_b):\n raise TypeError(\"m_b must be a list of lists\")\n if m_a in [[], [[]]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b in [[], [[]]]:\n raise ValueError(\"m_b can't be empty\")\n if not all(all(isinstance(i, (int, float)) for i in lst) for lst in m_a):\n raise TypeError(\"m_a should contain only integers or floats\")\n if not all(all(isinstance(i, (int, float)) for i in lst) for lst in m_b):\n raise TypeError(\"m_b should contain only integers or floats\")\n if not all(len(i) == len(m_a[0]) for i in m_a):\n raise TypeError(\"each row of m_a must be of the same size\")\n if not all(len(i) == len(m_b[0]) for i in m_b):\n raise TypeError(\"each row of m_b must be of the same size\")\n if not len(m_a[0]) == len(m_b):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n new_matrix = [[0 for i in m_b[0]] for j in m_a]\n for i in range(len(m_a)):\n for j in range(len(m_b[0])):\n for k in range(len(m_b)):\n new_matrix[i][j] += m_a[i][k] * m_b[k][j]\n return new_matrix", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def matrixMul(a, b):\n # Initializing Empty Matrix\n c = [[0, 0], [0, 0]]\n # 2x2 matrix multiplication. Essentially O(1)\n for i in range(2):\n for j in range(2):\n for k in range(2):\n c[i][j] = (c[i][j] + (a[i][k] * b[k][j]))\n\n # Returning the products\n return c", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def matmul(x, y, _pub):\n if x.shape[-1] != y.shape[-2]:\n pass # TODO: REPORT ERROR\n res = paillier_gpu.matmul_impl(x.flatten(), y.flatten(order='F'), x.shape, y.shape)\n\n return res", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def multiply(self, a, b):\n return a * b", "def matrix_multiply(self, Am, Bm):\r\n # Section 1: Ensure A & B dimensions are correct for multiplication\r\n rowsA = len(Am)\r\n colsA = len(Am[0])\r\n rowsB = len(Bm)\r\n colsB = len(Bm[0])\r\n if colsA != rowsB:\r\n raise ArithmeticError(\r\n 'Number of A columns must equal number of B rows.')\r\n \r\n # Section 2: Store matrix multiplication in a new matrix\r\n C = self.zeros_matrix(rowsA, colsB)\r\n for i in range(rowsA):\r\n for j in range(colsB):\r\n total = 0\r\n for ii in range(colsA):\r\n total += Am[i][ii] * Bm[ii][j]\r\n C[i][j] = total\r\n \r\n return C", "def __mul__(self, other):\n return sum(self._ar * other._ar)", "def mul(a,b):\r\n return a*b", "def multiply(a, b):\n columns_of_a = len(a[0])\n lines_of_b = len(b)\n if columns_of_a != lines_of_b:\n # Check matrix dimensions\n print \"Incompatible sizes!\"\n else:\n lines_of_a = len(a)\n columns_of_b = len(b[0])\n #C = []\n #for i in range (lines_of_a):\n # C.append(columns_of_b * [0])\n c = [columns_of_b * [0] for i in range(lines_of_a)]\n for i in range(lines_of_a):\n for j in range(columns_of_b):\n for k in range(lines_of_b):\n c[i][j] += a[i][k] * b[k][j]\n return c", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def prod_mat(self,other):\n [rs,cs],[ro,co] = self.D,other.D\n assert cs == ro, \"tailles incompatibles\"\n return Mat([rs,co], lambda i,j : prod_scal(self.ligne(i),other.col(j)))", "def matrix_mul(m_a, m_b):\n if not isinstance(m_a, list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b, list):\n raise TypeError(\"m_b must be a list\")\n if len(list(filter(lambda i: not isinstance(i, list), m_a))) > 0:\n raise TypeError(\"m_a must be a list of lists\")\n if len(list(filter(lambda i: not isinstance(i, list), m_b))) > 0:\n raise TypeError(\"m_b must be a list of lists\")\n if m_a is None or m_a == [] or m_a == [[]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b is None or m_b == [] or m_b == [[]]:\n raise ValueError(\"m_b can't be empty\")\n for r in m_a:\n for v in r:\n if not isinstance(v, (int, float)):\n raise ValueError(\"m_a should contain only integers or floats\")\n for r in m_b:\n for v in r:\n if not isinstance(v, (int, float)):\n raise ValueError(\"m_b should contain only integers or floats\")\n if max(map(lambda i: len(i), m_a)) != min(map(lambda i: len(i), m_a)):\n raise TypeError(\"each row of m_a must be of the same size\")\n if max(map(lambda i: len(i), m_b)) != min(map(lambda i: len(i), m_b)):\n raise TypeError(\"each row of m_b must be of the same size\")\n try:\n w = (len(m_a) + (0, 1)[len(m_a) == 1])\n m_c = [(['x'] * w) for b in range(len(m_b[0]))]\n for i in range(len(m_a)):\n for j in range(len(m_b[0])):\n s = 0\n for k in range(len(m_a[0])):\n s += (m_a[i][k] * m_b[k][j])\n m_c[i][j] = s\n return list(filter(lambda r: r != (['x'] * w), m_c))\n except:\n raise ValueError(\"m_a and m_b can't be multiplied\")", "def matmul(x1, x2, out=None, casting='same_kind',\n order='K', dtype=None, subok=True,\n signature=None, extobj=None):\n if np.isscalar(x1) or np.isscalar(x2):\n raise ValueError(\"matmul: input can not be scalar\")\n return dot(x1, x2, out)", "def multiplicacion(x, y):\n return x * y", "def multiply(x, y):\n\n return x * y", "def mult(a, b):\n return a * b", "def matrix_multiply(A, B):\n # Section 1: Ensure A & B dimensions are correct for multiplication\n rowsA = len(A); colsA = len(A[0])\n rowsB = len(B); colsB = len(B[0])\n if colsA != rowsB:\n raise ArithmeticError(\n 'Number of A columns must equal number of B rows.')\n\n # Section 2: Store matrix multiplication in a new matrix\n C = zeros_matrix(rowsA, colsB)\n for i in range(rowsA):\n for j in range(colsB):\n total = 0\n for ii in range(colsA):\n total += A[i][ii] * B[ii][j]\n C[i][j] = total\n\n return C", "def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)", "def matrix_mul(m_a, m_b):\n rows_a = 0\n cols_a = 0\n rows_b = 0\n cols_b = 0\n if type(m_a) is not list:\n raise TypeError(\"m_a must be a list\")\n if type(m_b) is not list:\n raise TypeError(\"m_b must be a list\")\n length = []\n for row in m_a:\n if type(row) is not list:\n raise TypeError(\"m_a must be a list of lists\")\n for row in m_b:\n if type(row) is not list:\n raise TypeError(\"m_b must be a list of lists\")\n if m_a == [] or m_a == [[]]:\n raise ValueError(\"m_a can't be empty\")\n if m_b == [] or m_b == [[]]:\n raise ValueError(\"m_b can't be empty\")\n for row in m_a:\n cols_a = 0\n for elem in row:\n if type(elem) is not int and type(elem) is not float:\n raise TypeError(\"m_a should contain only integers or floats\")\n cols_a += 1\n for row in m_b:\n cols_b = 0\n for elem in row:\n if type(elem) is not int and type(elem) is not float:\n raise TypeError(\"m_b should contain only integers or floats\")\n cols_b += 1\n for row in m_a:\n length.append(len(row))\n rows_a += 1\n if not len(set(length)) <= 1:\n raise TypeError(\"each row of m_a must be of the same size\")\n length.clear()\n for row in m_b:\n length.append(len(row))\n rows_b += 1\n if not len(set(length)) <= 1:\n raise TypeError(\"each row of m_b must be of the same size\")\n if cols_a != rows_b:\n raise ValueError(\"m_a and m_b can't be multiplied\")\n new = [[0 for i in range(cols_b)] for j in range(rows_a)]\n for new_rows in range(rows_a):\n for new_cols in range(cols_b):\n for i in range(cols_a):\n new[new_rows][new_cols] += m_a[new_rows][i] * m_b[i][new_cols]\n return new", "def matrix_mult(m1, m2):\n output = []\n for rowIndex, row in enumerate(m1): #go through rows in m1\n new_row = []\n for columnIndex in range(len(m2[0])): #go through indices for each column of m2\n sum = 0\n for index3 in range(len(row)):\n product = m1[rowIndex][index3] * m2[index3][columnIndex]\n sum += product\n new_row.append(sum)\n output.append(new_row)\n return output\n \n \n #output = []\n #first for loop corresponds to the rows of my output matrix and loops through the rows of m1 (enumerate)\n #create an empty new row\n # second for loop, loops through columns of m2\n # create sum variable, initialize it with zero\n # third for loop, multiplies the index of the row in m1 times the index of the column in m2\n # add sum to product and assign this to the sum variable\n # append sum to new row\n # append new row to output\n # return output", "def __mul__(self, other):\n return Vec2d(self.v[0] * other, self.v[1] * other)", "def matrixMult( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n sum = 0\r\n for j in count:\r\n sum += matrix0[ key ][j] * matrix1[ keys[j] ][i]\r\n result[ key ].insert( i, sum )\r\n \r\n return result", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)", "def multiply(self, other):\n if is_matrix(other):\n return self._multiply_matrix(other)\n elif mathutil.is_scalar(other):\n return self._multiply_scalar(other, 1)\n else:\n self._logger.error(\n \"'Matrix' intance, int, float or complex expected, not '{}'\".format(type(other)))\n raise TypeError(\n \"'Matrix' intance, int, float or complex expected, not '{}'\".format(type(other)))", "def mul(a,b):\n return [a[0]*b[0],a[1]*b[1],a[2]*b[2],1.0]", "def multiplication(a, b):\n return a * b", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def fast_matmul_2d(self, other):\n if isinstance(other, np.ndarray):\n mat_tensor = PaillierTensor(other, partitions=self.partitions)\n return self.fast_matmul_2d(mat_tensor)\n\n if isinstance(other, CTableABC):\n other = PaillierTensor(other)\n\n func = self._vector_mul\n ret_mat = self._obj.join(other.get_obj(), lambda vec1, vec2: (vec1, vec2)).applyPartitions(func).reduce(\n lambda mat1, mat2: mat1 + mat2)\n\n return ret_mat", "def __mul__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n newValue = self.layout.gmt_func(self.value, other.value)\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj*other\n\n newValue = other * self.value\n\n return self._newMV(newValue)", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def multiply(x, y):\n return x * y", "def multiply(x, y):\n return x * y" ]
[ "0.8291302", "0.8262729", "0.80926496", "0.78970397", "0.78095114", "0.76959616", "0.76780117", "0.76680285", "0.7643243", "0.7567869", "0.753683", "0.7533653", "0.7526934", "0.752129", "0.74732196", "0.7456403", "0.7453868", "0.7389877", "0.7388949", "0.73717046", "0.7357616", "0.73049134", "0.7300121", "0.7288647", "0.7288437", "0.72863185", "0.7283086", "0.72576666", "0.7254974", "0.72517705", "0.7250966", "0.72461575", "0.722486", "0.7223623", "0.72132456", "0.71945465", "0.7192831", "0.7186697", "0.7185638", "0.71823144", "0.7166138", "0.71526915", "0.7152135", "0.7142008", "0.71097755", "0.7106241", "0.7100691", "0.7092576", "0.7086155", "0.707982", "0.7065619", "0.70598066", "0.70511085", "0.7030263", "0.7028465", "0.70274633", "0.7027264", "0.7022426", "0.70206916", "0.70189065", "0.7012479", "0.7012479", "0.70102537", "0.6993692", "0.6993628", "0.69919896", "0.6984545", "0.6982274", "0.69814885", "0.6971744", "0.69706374", "0.6968011", "0.6966809", "0.6965889", "0.69634014", "0.69535464", "0.6941218", "0.69387645", "0.69383514", "0.6912387", "0.690194", "0.6886712", "0.6885916", "0.6874928", "0.6872904", "0.68699086", "0.6863542", "0.686164", "0.6849955", "0.6848119", "0.6845351", "0.68450934", "0.6844369", "0.6844369", "0.6843433", "0.6830382", "0.68264747", "0.6821302", "0.6808765", "0.6805022", "0.6805022" ]
0.0
-1
Load instruments from configpath
def _load(self) -> list[Instrument]: logger.info("Loading config...") self._config = yml.load(self.configpath) instruments, modespec = self._config["instruments"], self._config["modes"] logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })", "def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))", "def load_from_config(self, **config: Any) -> None:\n for key, filename in config.items():\n self.load(filename, key)", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def load_config(self):\n pass", "def load_cfg(self, path):\n if os.path.exists(path):\n self.djs_core = Librarian(path)\n if self.djs_core.load_cfg():\n self.plugins = self.djs_core.debug_info()['plugins']\n tmp = [plug.split(\":\") for plug in self.plugins]\n result = {}\n for lis in tmp:\n if not lis[0] in result:\n result[lis[0]] = []\n result[lis[0]].append(lis[1])\n self.info = dict2table(result)\n print(\"Load done.\\n\")\n else:\n print(\"Configuration file path not found.\\n\")", "def load_config(path):\n with open(path, \"rt\") as reader:\n config = pyaml.yaml.load(reader, Loader=pyaml.yaml.Loader)\n if config[\"regularization\"][\"type\"] is None or config[\"regularization\"][\"type\"] == [None]:\n config[\"regularization\"][\"type\"] = []\n if \"attention\" in config[\"regularization\"][\"type\"]:\n raise NotImplementedError\n\n config[\"experiment\"] = os.path.splitext(os.path.basename(path))[0]\n config[\"ckpt_dir\"], config[\"runs_dir\"] = init_output_dirs(config[\"experiment\"])\n \n return config", "def load(self, configs, container):\n pass;", "def load_from_conf(self):\r\n raise NotImplementedError", "def preload_all_configs(self):\n for _, _, filenames in os.walk(self.configDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n configID = filename[0:-3]\n self.load_config(configID)", "def _loadConfigFiles(self):\n for conf in self._configFiles():\n self.configManager.load(conf)", "def load_from_conf(self):\n raise NotImplementedError", "def load_config_files(cls, config_files):\n \n config = Config()\n for configfile in config_files:\n # Note: each file loaded by the config will overlay on the previously loaded files\n config.loadfile(configfile)\n return config.sim", "def load_configuration(self, path):\n with open(path) as conf_file:\n if path.name not in self.configuration:\n self.configuration[path.name] = {}\n self.configuration[path.name] = json.load(conf_file)", "def load_analysis_path():\n import json\n import os\n with open(os.path.join(os.path.dirname(__file__), \"analysis_config.json\")) as my_file:\n analysis_paths = json.load(my_file)\n return analysis_paths", "def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser", "def load_config(config_path):\n global config\n with open(config_path) as config_file:\n config = munchify(yaml.safe_load(config_file))", "def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )", "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))", "def load(self, config_instance):\r\n pass", "def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)", "def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')", "def load_configurations(self):\n path = os.path.join(self.user_directory, \"config\")\n configurations = {\n \"data_connector\": DataConnectorConfiguration,\n \"formats\": FormatsConfiguration,\n \"server\": ServerConfiguration,\n }\n\n for filename, configuration in configurations.items():\n config_path = os.path.join(path, filename + \".yml\")\n configuration = configuration.read_YAML(config_path)\n self.configurations[filename] = configuration", "def _load (cls, *files):\n config = ConfigParser.ConfigParser()\n config.read(files)\n \n metadata = {}\n if config.has_section(\"metadata\"):\n for key in config.options(\"metadata\"):\n metadata[key] = config.get(\"metadata\", key)\n\n processes = {}\n datasources = {}\n for section in config.sections():\n if section == \"metadata\": continue\n if section.startswith(\"process_\"):\n try:\n processes[section[8:]] = FeatureServer.Processing.loadFromSection(config, section)\n except Exception, E:\n pass \n else: \n datasources[section] = cls.loadFromSection(\n config, section, 'DataSource')\n\n return cls(datasources, metadata, processes)", "def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')", "def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )", "def load(path: str, config_cls):\n\n return cfg.load(path, config_cls)", "def _load_config(self):\n\n for p in self._paths:\n if p.exists():\n with p.open() as f:\n c = yaml.safe_load(f)\n if c:\n c['_config_file'] = str(p)\n return c\n else:\n raise ConfigurationError(f\"Didn't find a config file in paths: {self._paths}\")\n\n return {}", "def load_from_config(self, **config: Any) -> None:\n for key, policy_spec in config.items():\n modelfile, templatefile = policy_spec\n self.load(modelfile, templatefile, key)", "def load(path):\n pass", "def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()", "def load_from_file(config_path):\n return load_json_file(config_path)", "def load(path: str) -> Any:\n config = load_configs(path)\n config.reduce(config.MUTATIONS)\n config.reduce('_reduce')\n for reduces in config.output.get('_reduce') or []:\n for item in reduces or [None]:\n config.reduce(item)\n\n output = config.output\n for post_process in output.get('_post_process') or []:\n file_info = find(post_process)\n file_info.search(file_info.module)(output)\n return output", "def read_config(self, config_filename):", "def parseConfigFile(self, config_file_path):\n parser = configparser.SafeConfigParser()\n parser.read(config_file_path)\n self.seuil_snr = int(parser.get('seuils', 'snr'))\n self.seuil_elev_sat = int(parser.get('seuils', 'sat_elevation'))\n\n # nav data path\n self.nav_data_file = parser.get('data', 'nav')\n\n print(self.nav_data_file)\n\n # obs data paths\n self.obs_data_file = parser.get('data', 'obs').split(\",\")\n\n print(self.obs_data_file)", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load(self):\n super().load()\n for channel in range(self.n_channels):\n c_str = 'Channel_{0:02d}'.format(channel)\n if c_str not in self:\n log.info(f'{c_str} not found in config yaml, adding it now with defaults')\n self.set(c_str, {'amplitude': 1.5, 'dc_offset': 0.0}, save_config=True)\n\n val = self.get(c_str)\n self.amplitude(channel, val['amplitude'])\n self.offset(channel, val['dc_offset'])\n self._set_register(0, self.get('clock_delay', 1000)//100 + self._seq_length//100 - 1)", "def load_epicsLive(self, instrument=None, **kwargs):\n# if 'instrument' in kwargs:\n# instrument = kwargs.get('instrument')\n# self.instrument = instrument\n if not instrument:\n instrument = self.instrument\n\n self.ioc = psioc.IOC(instrument=self.instrument)\n if self.instrument in ['cxi', 'mfx', 'xcs', 'mec']:\n self.ioc.load_cfg('xrt')\n \n print 'Loading EpicsSets for', instrument\n self.epicsLive = lcls_devices.EpicsSets(instrument=instrument, **kwargs)\n self.update_epicsLive()", "def config(self) -> InstrumentConfig:\n ...", "def load_config(path_: str) -> Any:\n path = find_system(path_).path\n if path is None:\n raise ValueError(\"Can't find path {path_!r}\".format(path_=path_))\n loader: Callable[[Any], Any]\n if path.endswith('.yaml'):\n loader = yaml.safe_load\n elif path.endswith('.json'):\n loader = json.load\n else:\n raise ValueError('No known loader for {0}'.format(path))\n with open(path) as file_object:\n return loader(file_object)", "def __load_config_files(self):\n # Parsed configuration files\n # ==========================\n T_dict = ParsedParameterFile(self.config_path('0/T'))\n fv_solutions = ParsedParameterFile(self.config_path('system/fvSolution'))\n fv_schemes = ParsedParameterFile(self.config_path('system/fvSchemes'))\n control_dict = ParsedParameterFile(self.config_path('system/controlDict'))\n transport_props = ParsedParameterFile(self.config_path('constant/transportProperties'))\n\n # Registered files\n # ================\n self.foam_file('0/T', T_dict)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('system/fvSolution', fv_solutions)\n self.foam_file('system/fvSchemes', fv_schemes)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('constant/transportProperties', transport_props)", "def load(self, path: str):\n pass", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def load_config(path):\n # opens config file\n try:\n config = configparser.ConfigParser()\n config.read(path)\n return config\n except Exception as e:\n print(\"Error loading config file: \", e)\n sys.exit(1)", "def loadConfig():\n global abs_path, app_list, app_api_subs\n\n # load application details\n with open(abs_path + '/../../../../config/apim.yaml', 'r') as file:\n apim_config = yaml.load(file, Loader=yaml.FullLoader)\n apps = apim_config['apps']\n\n for app in apps:\n app_list[app['name']] = []\n app_api_subs[app['name']] = app['api_subscriptions'].split(',')", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def loadconfig():\n CONFIG['static_folder'] = str(Path(Path(APP.root_path).parent, 'static'))\n\n for cfile in Path(APP.instance_path).iterdir():\n if cfile.name[-5:] == '.json' and cfile.name != 'config.json':\n name = cfile.name[:-5]\n LOG.debug(\"Loading \" + name)\n with cfile.open() as json_data_file:\n CONFIG[name] = json.load(json_data_file)", "def load_configuration(self, path=None):\n # Parse the default configuration file\n default_path = os.path.join(os.path.dirname(__file__), \"configs\")\n\n if path is None or path == 'default':\n path = os.path.join(default_path, \"default\", \"default.yaml\")\n elif path == 'cubeviz':\n path = os.path.join(default_path, \"cubeviz\", \"cubeviz.yaml\")\n elif path == 'specviz':\n path = os.path.join(default_path, \"specviz\", \"specviz.yaml\")\n elif path == 'mosviz':\n path = os.path.join(default_path, \"mosviz\", \"mosviz.yaml\")\n elif not os.path.isfile(path):\n raise ValueError(\"Configuration must be path to a .yaml file.\")\n\n with open(path, 'r') as f:\n config = yaml.safe_load(f)\n\n self.state.settings.update(config.get('settings'))\n\n def compose_viewer_area(viewer_area_items):\n stack_items = []\n\n for item in viewer_area_items:\n stack_item = self._create_stack_item(\n container=CONTAINER_TYPES[item.get('container')])\n\n stack_items.append(stack_item)\n\n for view in item.get('viewers', []):\n viewer = self._application_handler.new_data_viewer(\n viewer_registry.members.get(view['plot'])['cls'],\n data=None, show=False)\n\n viewer_item = self._create_viewer_item(\n name=view.get('name'),\n viewer=viewer,\n reference=view.get('reference'))\n\n self._viewer_store[viewer_item['id']] = viewer\n\n stack_item.get('viewers').append(viewer_item)\n\n if len(item.get('children', [])) > 0:\n child_stack_items = compose_viewer_area(\n item.get('children'))\n stack_item['children'] = child_stack_items\n\n return stack_items\n\n if config.get('viewer_area') is not None:\n stack_items = compose_viewer_area(config.get('viewer_area'))\n self.state.stack_items.extend(stack_items)\n\n # Add the toolbar item filter to the toolbar component\n for name in config.get('toolbar', []):\n tool = tool_registry.members.get(name)(app=self)\n\n self.state.tool_items.append({\n 'name': name,\n 'widget': \"IPY_MODEL_\" + tool.model_id\n })\n\n for name in config.get('tray', []):\n tray = tray_registry.members.get(name)\n tray_item_instance = tray.get('cls')(app=self)\n tray_item_label = tray.get('label')\n\n self.state.tray_items.append({\n 'name': name,\n 'label': tray_item_label,\n 'widget': \"IPY_MODEL_\" + tray_item_instance.model_id\n })\n\n config_loaded_message = ConfigurationLoadedMessage(\n config['settings'].get('configuration', 'default'), sender=self)\n self.hub.broadcast(config_loaded_message)", "def load(file):\n _config.load(file)", "def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config", "def _load_config(path) -> dict:\n with open(path, \"r\") as F:\n return json.load(F)", "def load_from_file(self, file_path):\n\n with open(file_path) as f:\n config_text = f.read()\n self.load_from_string(config_text)", "def load_extensions(self, config):\n loaded_extensions = []\n for extension in self.extensions:\n load_func = getattr(extension, \"load\")\n loaded_extension = load_func(config)\n if loaded_extension:\n loaded_extensions.append(loaded_extension)\n return loaded_extensions", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg", "def load_config(self):\n if not os.path.exists(self.config_file):\n raise MissingConfigFileError(self.config_file)\n \n config = ConfigParser()\n config.read(self.config_file)\n try:\n self.loop_songs = config.getboolean(\"config\", \"loop_songs\")\n self.randomise = config.getboolean(\"config\", \"randomise\")\n self.index_dir = config.get(\"config\", \"index_dir\")\n self.music_client = config.get(\"config\", \"music_client\")\n # music_dirs may be separated by commas. Unfortunately the\n # create_config_file() does not take account of this at the\n # moment. TODO: implement this\n self.music_dirs = config.get(\"config\", \"music_dirs\").split(\",\") \n except NoOptionError:\n sys.stderr.write(\"No such option in config file\\n\")\n sys.exit(1)\n except NoSectionError:\n sys.stderr.write(\"No such section in config file\\n\")\n sys.exit(1)\n except MissingSectionHeaderError:\n sys.stderr.write(\"Failed to parse config file\\n\")\n sys.exit(1)\n \n # Verify that our music dirs are actually dirs\n for i, path in enumerate(self.music_dirs):\n try:\n check_is_dir(path)\n except DirectoryNotFoundError:\n # If an invalid directory was listed we want to remove it from \n # the list and carry on. It is likely that the user provided a \n # path containing a folder with a comma in the filename, so \n # lets warn of that\n self.music_dirs.pop(i)\n sys.stdout.write(\"WARNING: The '%s' directory is invalid. Please review \"\n \"your config file. Please note that directories must \"\n \"not contain commas as these are used as a \" \n \"delimiter\\n\" % path)\n \n if not os.path.isdir(self.index_dir):\n sys.stdout.write(\"Creating indicies path %s\\n\" % self.index_dir)\n os.mkdir(self.index_dir)\n self._update_index()", "def test_gather_configs_calls_load_config_from_file(self, mocked_load):\n file_path = '/file/path'\n mock_config_entry_metas = Mock()\n FileConfigSource(file_path).gather_configs(mock_config_entry_metas)\n\n mocked_load.assert_called_with(file_path)", "def load(path, files=None):\n if path is None:\n raise ValueError(\"Configuration load error. The 'path' parameter cannot be None.\")\n if not os.path.isdir(path):\n raise ValueError(\"Configuration load error. The 'path' parameter (%s) must point to an existing directory.\" % path)\n\n if files is not None:\n config_files = [os.path.join(path, f) for f in files]\n else:\n config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')]\n config = {} # Configuration with params/vars/extensions\n param_info = {} # Information on params such as type and help messages\n for config_file in config_files:\n if not os.path.isfile(config_file):\n raise ValueError(\"Configuration load error. Configuration data cannot be loaded for not a file (%s)\" % config_file)\n with open(config_file) as file_obj:\n try:\n # A part of global configuration from this particular file\n config_section = json.load(file_obj)\n # Update parameters info.\n ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False)\n # Joing configuration from this single file.\n ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section))\n except ValueError:\n logging.error(\"Configuration load error. Invalid JSON configuration in file %s\", config_file)\n raise\n return (config_files, config, param_info)", "def load_from_file(self, path):\n schema = self.schema\n \n # Set up the default values.\n if schema is not None:\n for sect, sect_obj in schema.items():\n for opt, val in sect_obj.items():\n # This call is to convert the value to\n # the type specified. We do this to\n # prevent the programmer from specifying\n # inconsistent type with the value in the \n # schema.\n self.set(*_convert(schema, sect, opt, val[1]))\n\n # Parse the INI file.\n parser = RawConfigParser()\n parser.read(path)\n \n sections = parser.sections()\n for section in sections:\n \n # If application has supplied a schema,\n # and it does not has such a section, we skip\n # it. No error raised.\n if schema is not None and \\\n not schema.has_key(section):\n continue\n\n options = parser.options(section)\n \n for option in options:\n \n # If application has supplied a schema,\n # we know the section is valid since it pass the\n # previus test, but if the option is not included\n # in the section, we skip it. No error raised.\n if schema is not None and \\\n (option not in schema[section]):\n continue \n \n # If there is a schema, then we convert the \n # option to its type stated in the schema,\n # otherwise we just leave it as string.\n if schema is not None:\n self.set(*_convert(schema, section, option,\n parser.get(section, option)))\n else:\n self.set(section, option,\n parser.get(section, option))", "def load_config(config_path):\n with open(config_path, \"r\") as f:\n conf = yaml.load(f)\n\n params = conf['param']\n return params", "def parse_config():\n config_path = Path(\"config.ini\")\n if config_path.exists():\n config.read(config_path)\n else:\n config[\"database\"] = {\"location\": \"image-database.db\"}\n config[\"images\"] = {\"extensions\": \".jpeg,.jpg,.png,.gif,.tiff\"}\n with open(config_path, \"w\") as configfile:\n config.write(configfile)\n config.read(config_path)", "def load_config():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config", "def load(paths):\n if not paths:\n raise ConfigException(message='No configuration file specified',\n reason=paths)\n yaml_dict = {}\n if type(paths) == str:\n paths = [paths]\n # for every filename in list...\n for path in paths:\n # read config file...\n with open(path) as f:\n # get config as dict...\n y = yaml.safe_load(f)\n # and merge into a single yaml dict.\n yaml_dict.update(y)\n config = Config()\n # get object for each key and set on the config object\n __construct(config, yaml_dict)\n\n return config", "def loadconfig(self, ref):\n mod, var = ref.rsplit(\".\", 1)\n mod = importlib.reload(importlib.import_module(mod))\n jobs = getattr(mod, var)\n return jobs", "def load_config(config_file_path):\n global config\n try:\n config_file_path = os.path.abspath(config_file_path)\n assert config_file_path\n with open(file=config_file_path) as yaml_data:\n loaded_config = yaml.safe_load(yaml_data)\n for k in config:\n if k in loaded_config:\n config[k] = loaded_config[k]\n except AssertionError:\n print(f\"Config file {config_file_path} not found or unreadable ! Exiting..\")\n quit(1)", "def load_config(self, filename):\n # read entire file for metadata\n fh = open(filename, 'r')\n self.file_contents = fh.read()\n\n # replace !include directives with content\n config_dir = os.path.split(filename)[0]\n include_re = re.compile('^!include\\s+(.*)$', re.MULTILINE)\n def include_repl(matchobj):\n fname = os.path.join(config_dir, matchobj.group(1))\n with open(fname) as f:\n return f.read()\n while re.search(include_re, self.file_contents): # for recursive !include\n self.file_contents = re.sub(include_re, include_repl, self.file_contents)\n\n # read in dictionary\n self.config = self.__ordered_load(self.file_contents)\n\n # convert functions of other params to true expressions\n for k in self.config.keys():\n self.config[k] = ExperimentConfig.__convert_key(self.config[k])\n\n # load core configuration\n return self.config", "def load_config():\n here = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(here, 'config.json')\n with open(config_path, encoding='utf-8') as f:\n return json.load(f)", "def init_configs(self):\n\n # get current location\n self.script_dir = os.path.dirname(__file__)\n\n # load configuration file\n with open(os.path.join(self.script_dir, \"config.json\")) as f:\n self.configs = json.load(f)\n \n # load some configs as attributes\n self.resource_folder = os.path.join(self.script_dir, self.configs[\"resource_path\"], self.resource_type, self.language)\n self.pre_processed_folder = os.path.join(self.resource_folder, self.configs[\"pre_processed_path\"])\n self.results_folder = os.path.join(self.resource_folder, self.configs[\"results_path\"])\n self.chunk_size = self.configs[\"resources\"][self.resource_type][\"chunk_size\"]", "def init_config(cls, path):\n try:\n config_string = open(path).read()\n except EnvironmentError as ex:\n LOGGER.error('Could not load %s file, error: %s', path, ex)\n sys.exit()\n\n try:\n cls.config = json.loads(config_string)\n except ValueError as ex:\n LOGGER.error(' %s file is not valid json, error: %s', path, ex)\n sys.exit()", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def samples_path():\n dir = os.path.dirname(os.path.abspath(__file__))\n samples = os.path.join(dir, 'samples')\n return samples", "def load_config(path):\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def load_configfile(cls, configfile):\n def check_path(directory):\n if not os.path.isdir(directory):\n raise NotADirectoryError\n else:\n return directory\n\n def make_dir(dir):\n if not os.path.isdir(dir):\n os.makedirs(dir)\n return dir\n\n if configfile:\n try:\n configfile = yaml.load(open(configfile).read())\n except TypeError:\n configfile = yaml.load(configfile.read())\n\n cls.__instance.samples = Cio.SampleList.from_configfile(configfile)\n\n if \"parallel containers\" in configfile:\n cls.__instance.threads = int(configfile[\"parallel containers\"])\n\n if \"threads per container\" in configfile:\n cls.__instance.container_threads = configfile[\"threads per container\"]\n\n if \"workdir\" in configfile:\n cls.__instance.work_dir = check_path(configfile[\"workdir\"])\n cls.__instance.cache_dir = make_dir(os.path.join(cls.__instance.work_dir, \".cacheIO\")) # Path for saving pickle objects\n else:\n cls.__instance.work_dir = os.getcwd()\n cls.__instance.cache_dir = make_dir(os.path.join(cls.__instance.work_dir, \".cacheIO\"))\n\n if \"tempdir\" in configfile:\n cls.__instance.tmp_dir = check_path(configfile[\"tempdir\"])\n else:\n cls.__instance.tmp_dir = cls.__instance.work_dir\n\n if \"log\" in configfile:\n log_file = os.path.join(cls.__instance.work_dir, configfile[\"log\"])\n cls.__instance.logger = Logger.setup_logger(\"logger\", log_file)\n\n return cls.__instance.samples", "def loadConfig(configFile):\n print \"Loading \" + configFile\n config.read(configFile)\n\n syslog = \"NO\"\n if config.has_option(\"Logging\", \"Syslog\"):\n syslog = config.get(\"Logging\", \"Syslog\")\n\n level = \"INFO\"\n if config.has_option(\"Logging\", \"Level\"):\n level = config.get(\"Logging\", \"Level\")\n\n if syslog == \"YES\":\n configLogger(\"\", -1, -1, \"YES\", level)\n else:\n configLogger(config.get(\"Logging\", \"File\"), \n config.getint(\"Logging\", \"MaxSize\"), \n config.getint(\"Logging\", \"NumFiles\"),\n \"NO\",\n level)\n\n # create connections first\n logger.info(\"Creating connections...\")\n for section in config.sections():\n if section.startswith(\"Connection\"):\n createConnection(config, section)\n\n logger.info(\"Populating the sensor/actuator list...\")\n for section in config.sections():\n if section.startswith(\"Sensor\"):\n sensors[section] = createDevice(config, section)\n elif section.startswith(\"Actuator\"):\n logger.debug(\"Adding actuator \" + section)\n actuators[section] = createDevice(config, section)\n\n return sensors", "def loadDrivers(self):\n\n self.sources = {}\n for source in self.config['sources']:\n sourceConf = self.config['sources'][source]\n baseClass = sourceConf['baseClass']\n self.logger.debug(\"Loading: \" + source +\n \" instance of: \" + baseClass)\n sourceArgs = sourceConf['source-config']\n self.sources[source] = {}\n try:\n print(baseClass)\n tempModule = import_module('sources.' + baseClass)\n \"\"\"tempModule = __import__('sources.' + baseClass,\n globals(), locals(), [baseClass], -1)\n \"\"\"\n self.sources[source]['source'] = getattr(tempModule, str(\n baseClass))(sourceArgs)\n except Exception as e:\n self.logger.error(\"exception: \" + str(e))\n return None", "def LoadConfig(path):\n config = None\n with open(path) as f:\n config = json.load(f)\n\n presets = {}\n for name in config['presets']:\n presets[name] = lightserver.Preset(**config['presets'][name])\n\n return config['bulbs'], config['groups'], presets", "def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)", "def loadConfig(fileName=None):\n if not fileName:\n fileName = Config.userDir + \"config.py\"\n try:\n config = literal_eval( (open(fileName).read()) )\n except Exception,e:\n print(e)\n return\n for c in Config.userConfig:\n if c in config:\n setattr(Config, c, config[c])\n Config.update()", "def load(path, reset=False):\n pass", "def parse_config(path):\n with open(path, \"r\") as config_file:\n return toml.load(config_file)", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def _load_config_file(self, config_type):\n cloudwatch_config = self.provider_config[\"cloudwatch\"]\n json_config_file_section = cloudwatch_config.get(config_type, {})\n json_config_file_path = json_config_file_section.get(\"config\", {})\n json_config_path = os.path.abspath(json_config_file_path)\n with open(json_config_path) as f:\n data = json.load(f)\n return data", "def load_config(args, path=\".\"):\n with open(path + \"/config/\" + args.config, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n args.__dict__[key] = value", "def load_data(self, dirname, conf_file=None, loader_cls=PMCTRACKLoader):\n self.sources.append(str(dirname))\n\n # Load configuration\n if conf_file is None:\n try:\n conf_file = list(dirname.glob(\"*.conf\"))[0]\n self.conf = TrackSettings(conf_file)\n except (IndexError, AttributeError):\n msg = (\n \"Track settings file (.conf) in the `dirname` directory\"\n \"is missing or could not be read\"\n )\n warnings.warn(msg, MissingConfWarning)\n\n # Load the tracks\n loader_obj = loader_cls(dirname=dirname)\n self.data = loader_obj()\n self.columns = self.data.columns", "def loadObservables(config):\n import imp\n import inspect\n import os\n\n CLI = config.getFolder(\"CLI+\")\n robust = CLI.getTagBoolDefault(\"robust\",False)\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n # set global property\n if config.getTagBoolDefault(\"evaluateSubObservablesLazily\", False):\n QFramework.TQMultiObservable.gEvaluateSubObservablesLazily = True\n\n customobservables = False\n # for each snippet,\n for observablescript_TString in config.getTagVString(\"customObservables.snippets\"):\n QFramework.TQStringUtils.removeLeadingBlanks(observablescript_TString)\n QFramework.TQStringUtils.removeTrailingBlanks(observablescript_TString)\n QFramework.TQStringUtils.removeTrailingText(observablescript_TString, \".py\")\n observablescript = observablescript_TString.Data()\n if len(observablescript) == 0:\n QFramework.INFO(\"Skipping custom observable loading - no snippets provided\")\n continue\n found_modules = []\n observablesDirs = config.getTagVStandardString(\"customObservables.directories\")\n # search through the directories provided in the config\n for observablesPath in observablesDirs:\n module = QFramework.TQFolder.concatPaths(observablesPath, observablescript)+\".py\"\n module = common.findConfigPath(module, False, True)\n # findConfigPath returns \"\" if no module was found\n if len(module) > 0:\n # snippet was found in this directory -\n # add it's absolute path and the directory it was found in\n # to a list in tuple form\n found_modules.append((module,observablesPath))\n if len(found_modules) == 0:\n # check CommonAnalysisHelpers for an observable snippet as fall-back\n CAHObservablesDir = \"CommonAnalysisHelpers/share/observables\"\n observablesDirs.push_back(CAHObservablesDir)\n module = QFramework.TQFolder.concatPaths(CAHObservablesDir, observablescript) + \".py\"\n module = QFramework.TQPathManager.findFileFromEnvVarWithoutExecDir(module, \"CAFCOREDIR\", False)\n if len(module) > 0:\n found_modules.append((module, CAHAlgorithmsDir))\n\n # continue only if there was one match found\n if len(found_modules) == 0:\n QFramework.BREAK(\"No module found for '{:s}' in the custom observable directories provided:\\n{:s}\\n\".format(observablescript,', '.join(observablesDirs))+\n \"Please make sure that there exists a snippet by the name of '{:s}.py' available in one of them.\\n\".format(observablescript))\n elif len(found_modules) > 1:\n QFramework.BREAK(\"Ambiguity detected while resolving custom observable snippet location. Multiple modules found for {:s} in the custom observable directories provided:\\n{:s}\\n\".format(observablescript,', '.join(observablesDirs))+\n \"Consider placing the {:s}.py snippet only in a common directory if it's used by more than one (sub)analysis.\".format(observablescript))\n abs_path = found_modules[0][0]\n module_name = os.path.basename(abs_path).rstrip(\".py\")\n relative_path = QFramework.TQFolder.concatPaths(found_modules[0][1], observablescript)+\".py\"\n QFramework.START(\"l.\",\"loading custom observable instances from the '{:s}' snippet\".format(str(relative_path)))\n try:\n addobservables = imp.load_source(module_name, abs_path)\n\n argspec = inspect.getargspec(addobservables.addObservables)\n if 'config' in argspec.args:\n added = addobservables.addObservables(config=config)\n elif len(argspec.args) == 1:\n added = addobservables.addObservables(config)\n elif len(argspec.args) == 0:\n added = addobservables.addObservables()\n else:\n QFramework.BREAK(\"unable to add observable(s) from script '{:s}' - unknown arguments appeared: {:s}\".format(abs_path, str(argspec.args)))\n if added:\n QFramework.END(QFramework.TQMessageStream.OK)\n customobservables = True\n else:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to properly setup custom observables from '{:s}'\".format(abs_path))\n except IOError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to open file '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except NameError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"syntax error in observable snippet '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except AttributeError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"attribute error in observable snippet '{:s}' - please double-check!\\n\".format(abs_path)+\n \"If the message from python below is\\n'module' object has no attribute 'addObservables'\\nplease make sure that the snippet has the function addObservables() defined.\\n\"\n \"Message from python:\\n\"+str(error))\n return customobservables", "def load_fixtures(path, **kwargs):\n from .library.files import FixtureFile\n\n if not os.path.exists(path):\n log.error(\"Path does not exist: %s\" % path)\n return None\n\n ini = ConfigParser()\n ini.read(path)\n\n fixtures = list()\n group = None\n for section in ini.sections():\n _kwargs = kwargs.copy()\n\n _section = section\n if \":\" in section:\n _section, group = section.split(\":\")\n\n if \".\" in _section:\n app_label, model_name = _section.split(\".\")\n else:\n app_label = _section\n model_name = None\n\n _kwargs['group'] = group\n _kwargs['model'] = model_name\n\n for key, value in ini.items(section):\n if key == \"db\":\n key = \"database\"\n elif key == \"nfk\":\n key = \"natural_foreign\"\n elif key == \"npk\":\n key = \"natural_primary\"\n else:\n pass\n\n _kwargs[key] = smart_cast(value)\n\n fixtures.append(FixtureFile(app_label, **_kwargs))\n\n return fixtures", "def __read_instrumentslist(self):\n available_instruments = []\n with open(\"instrumentslist.txt\", \"r\") as file:\n for line in file:\n splited = line.split(\" \")\n name = \"\"\n for j in splited[1:]:\n name += j\n available_instruments.append(name)\n return available_instruments", "def _load_file(self):\n try:\n with open(self.path) as f:\n conf_lines = f.readlines()\n except Exception:\n sys.stderr.write(\"open('%s') failed: %s\\n\" %\n (self.path, sys.exc_info()[1]))\n raise\n\n for lineno, line in enumerate(conf_lines):\n entry = self._handler(line, lineno, self.syspaths)\n self._lines.append(entry)\n self._linemap[lineno] = entry", "def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)", "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def create_probes(base_path: Path, config: SectionProxy) -> List[Probe]:\n probes = []\n probes_path = resolve_path(base_path, config['probes_file'])\n probes_config = ConfigParser()\n found_files = probes_config.read(probes_path)\n if len(found_files) == 0:\n raise ConfigFileNotFound(\n f'Probes file not found: {str(probes_path)!r}',\n )\n for section_name in probes_config.sections():\n category, dot, name = section_name.partition('.')\n if category == 'probe':\n section = probes_config[section_name]\n probe = Probe(\n url=section['url'],\n interval_secs=section.getfloat('interval_secs'),\n expected_pattern=section.get('expected_pattern'),\n )\n probes.append(probe)\n return probes", "def _load_config():\n config_files = ('lambda', 'global')\n config = {}\n for config_file in config_files:\n config_file_path = 'conf/{}.json'.format(config_file)\n\n if not os.path.exists(config_file_path):\n raise ConfigError('The \\'{}\\' config file was not found'.format(\n config_file_path))\n\n with open(config_file_path) as config_fh:\n try:\n config[config_file] = json.load(config_fh)\n except ValueError:\n raise ConfigError('The \\'{}\\' config file is not valid JSON'.format(\n config_file))\n\n return config", "def load_resource_config(config_path):\n return load_json_file(config_path)", "def load_qiime_config():\r\n\r\n qiime_config_filepaths = []\r\n qiime_project_dir = get_qiime_project_dir()\r\n qiime_config_filepaths.append(\r\n qiime_project_dir + '/qiime/support_files/qiime_config')\r\n\r\n qiime_config_env_filepath = getenv('QIIME_CONFIG_FP')\r\n if qiime_config_env_filepath:\r\n qiime_config_filepaths.append(qiime_config_env_filepath)\r\n\r\n home_dir = getenv('HOME')\r\n if home_dir:\r\n qiime_config_home_filepath = home_dir + '/.qiime_config'\r\n qiime_config_filepaths.append(qiime_config_home_filepath)\r\n\r\n qiime_config_files = []\r\n for qiime_config_filepath in qiime_config_filepaths:\r\n if exists(qiime_config_filepath):\r\n qiime_config_files.append(open(qiime_config_filepath))\r\n\r\n return parse_qiime_config_files(qiime_config_files)", "def load_config_file(config_path: Path) -> Dict[str, Any]:\n code = compile(\n config_path.read_text(),\n config_path.name,\n \"exec\")\n locals: Dict[str, Any] = {}\n exec(code, {\"__builtins__\": __builtins__}, locals)\n return locals", "def _load_config(config_path: str) -> dict:\n path = Path(config_path).expanduser()\n if not path.exists():\n # Produce a nice error message like:\n # Config file NAPARI_ASYNC=missing-file.json not found\n raise FileNotFoundError(\n errno.ENOENT, f\"Config file NAPARI_ASYNC={path} not found\", path,\n )\n\n with path.open() as infile:\n return json.load(infile)" ]
[ "0.63088006", "0.62734544", "0.60392916", "0.59869903", "0.58425516", "0.5792564", "0.57058764", "0.56768847", "0.56603354", "0.5657189", "0.56550276", "0.56537586", "0.5621684", "0.5609595", "0.5587608", "0.5586826", "0.55499035", "0.5522914", "0.5517707", "0.5516953", "0.54643613", "0.5454913", "0.5450706", "0.54503214", "0.54438996", "0.54210806", "0.5406843", "0.53966624", "0.5388618", "0.5362996", "0.53622496", "0.53582424", "0.5355628", "0.53292125", "0.53231055", "0.53092235", "0.5309194", "0.5309194", "0.52994686", "0.5277331", "0.5270361", "0.52660865", "0.52529764", "0.5250644", "0.52307373", "0.52166486", "0.52093077", "0.52065486", "0.5195647", "0.5190693", "0.5184902", "0.5177622", "0.5176367", "0.51711714", "0.51653516", "0.515231", "0.51410735", "0.51336545", "0.51318675", "0.5115056", "0.5095313", "0.5091621", "0.50902283", "0.5087503", "0.50774425", "0.5065352", "0.50650245", "0.5061337", "0.50607544", "0.50571084", "0.50562197", "0.5043483", "0.5043483", "0.5032558", "0.50229585", "0.5014376", "0.5013593", "0.50049293", "0.50023746", "0.49993458", "0.4995882", "0.49937427", "0.4992353", "0.49916112", "0.49908897", "0.49897516", "0.4981871", "0.49803194", "0.49793088", "0.49752516", "0.49712574", "0.49667016", "0.49617153", "0.49568915", "0.49473017", "0.49469164", "0.49405694", "0.49391714", "0.4936421", "0.49341503" ]
0.7140976
0
Expose unique instrument classes found in config
def _expose(self) -> None: classes = {instrument.__class__ for instrument in self._config["instruments"]} for class_ in classes: pyro.expose(class_) logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _config_classes(self):\n pass", "def config(self) -> InstrumentConfig:\n ...", "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls", "def instrument_type(self):\n \n raise NotImplementedError()", "def register_config(cls):\n _configs[cls.__name__] = cls", "def __init__(self, configGroups):\r\n self.config = {cls:configGroup[classes] for configGroup in configGroups for classes in configGroup for cls in IterWrapper(classes)}", "def register_classes():\n AnalyzeExtension.register_class()\n AnalyzeExtension_SG.register_class()", "def classify_instrument(self):\n print(self._identity)\n if self._identity in multimeters:\n print('Instrument in multimeter list')\n return Multimeter.from_serial_instrument(self)\n elif self._identity in function_generators:\n print('Instrument in function generator list')\n return FunctionGenerator.from_serial_instrument(self)\n elif self._identity in power_supplies:\n print('Instrument in power supply list')\n return PowerSupply.from_serial_instrument(self)\n else:\n return None", "def _instrument(self, **kwargs):\n klasses = get_base_estimators(packages=self.packages)\n attributes = kwargs.get(\"attributes\")\n for _, klass in klasses.items():\n if issubclass(klass, self.exclude_classes):\n logger.debug(\"Not instrumenting (excluded): %s\", str(klass))\n else:\n logger.debug(\"Instrumenting: %s\", str(klass))\n for method_name in self.methods:\n if hasattr(klass, method_name):\n self._instrument_class_method(\n estimator=klass,\n method_name=method_name,\n attributes=attributes,\n )", "def instr_dict():\n out = base_dict()\n out['mro']['current'] = ['Instrument']\n ao(out, 'nSamples', 'Integer', 1, 'Number of samples', readLevel=3)\n ao(out, 'devices', 'List', attr=['Hidden'])\n ao(out, 'initTest', 'Progress', attr=['Hidden'])\n ao(out, 'closingTest', 'Progress', attr=['Hidden'])\n return out", "def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")", "def test_instrument(self, nexus_base):\n assert isinstance(nexus_base.instrument, nx.NXinstrument)", "def identify_class(self, cls):", "def account_instruments(self, instruments: Instruments = sentinel):\n pass", "def test_no_double_configuration(self):\n class A(pyperry.Base):\n def _config(cls):\n cls.add_processor('read', 'some processor')\n self.assertEqual(len(A.adapter_config['read']['_processors']), 1)\n\n class B(A): pass\n self.assertEqual(len(B.adapter_config['read']['_processors']), 1)", "def sensor_classes(self):\n raise NotImplementedError", "def setup_class(klass):", "def setup_class(klass):", "def add_instrument(self, mount, instrument):\n pass", "def get_cls_dict(config_path):\n return {i: n for i, n in enumerate(get_names(config_path))}", "def post_instrument_class(self, mapper):\n pass", "def register_source(klass):\n EVENT_SOURCES[klass.__name__] = klass", "def _register_outliner_classes(self):\n\n if not self._project:\n LOGGER.warning('Impossible to register outliner classes because Artella project is not defined!')\n return False\n\n outliners_data = self._config.get('outliners', default=dict())\n if not outliners_data:\n LOGGER.warning('No outliners found in artellapipe-tools-outliner configuration file to register!')\n return\n\n for outliner_type, outliner_info in outliners_data.items():\n full_outliner_class = outliner_info.get('class', None)\n if not full_outliner_class:\n LOGGER.warning('No class defined for Outliner Type \"{}\". Skipping ...'.format(outliner_type))\n continue\n outliner_class_split = full_outliner_class.split('.')\n outliner_class = outliner_class_split[-1]\n outliner_name = outliner_info.get('name', outliner_class)\n outliner_categories = outliner_info.get('categories', list())\n outliner_module = '.'.join(outliner_class_split[:-1])\n LOGGER.info('Registering Outliner: {}'.format(outliner_module))\n\n try:\n module_loader = loader.find_loader(outliner_module)\n except Exception as exc:\n LOGGER.warning('Impossible to register Outliner Module: {} | {}'.format(outliner_module, exc))\n continue\n if not module_loader:\n LOGGER.warning('Impossible to load Outliner Module: {}'.format(outliner_module))\n continue\n\n class_found = None\n try:\n mod = importlib.import_module(module_loader.fullname)\n except Exception as exc:\n LOGGER.warning('Impossible to register outliner class: {} | {}'.format(module_loader.fullname, exc))\n continue\n\n for cname, obj in inspect.getmembers(mod, inspect.isclass):\n if cname == outliner_class:\n class_found = obj\n break\n\n if not class_found:\n LOGGER.warning('No Outliner Class \"{}\" found in Module: \"{}\"'.format(outliner_class, outliner_module))\n continue\n\n obj.NAME = outliner_name\n obj.CATEGORIES = outliner_categories\n\n self.register_outliner_class(outliner_type, obj)\n\n return True", "def __init__(self):\n self.classes = {}", "def instruments_dict(self): # TODO DEPRECATE\n return self.instruments.dict", "def instruments(self):\r\n return self.get_field('instrument')", "def test_unique_adapters(self):\n class Super(pyperry.Base): pass\n Super.configure('read', adapter=TestAdapter, conf='super')\n\n class Child(Super): pass\n Child.configure('read', adapter=TestAdapter, conf='child')\n\n super_adapter = Super.adapter('read')\n child_adapter = Child.adapter('read')\n\n self.assertTrue(super_adapter is not child_adapter)\n self.assertEqual(super_adapter.config.conf, 'super')\n self.assertEqual(child_adapter.config.conf, 'child')", "def instruments(self) -> dict:\n return self._instruments", "def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()", "def iter_spider_classes(module):\n ...", "def new_instrument(self, entry=\"entry\", instrument_name=\"id00\",):\n if not isinstance(entry, h5py.Group):\n entry = self.new_entry(entry)\n return self.new_class(entry, instrument_name, \"NXinstrument\")", "def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by_path(orig_path)\n except (AttributeError, ImportError):\n logger.debug(\"[OTel] Failed to import %s\", orig_path)\n continue\n\n original_classes[package] = original_cls\n\n return original_classes", "def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instruments, self).__init__(endpoint, method=self.METHOD)", "def register_classes():\n CollectLimits.register_class()\n CollectLimits_SG.register_class()\n CollectStackedLimits_SG.register_class()", "def new_instrument(self, instrument_type):\r\n return self.instrument_list[instrument_type](instrument_type,\r\n self.midi_output)", "def process_class_list(self, module, classes):", "def register_classes():\n CoaddSplit.register_class()\n CoaddSplit_SG.register_class()", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def setup_class(cls):\n cls.expected_custom_component_configuration = dict(foo=\"bar\")\n\n cls.skill_config = SkillConfig(\n name=\"skill_name\",\n author=\"author\",\n version=\"0.1.0\",\n )\n\n cls.skill_config.protocols = {cls.old_protocol_id}\n cls.skill_config.contracts = {cls.old_contract_id}\n cls.skill_config.connections = {cls.old_connection_id}\n cls.skill_config.skills = {cls.old_skill_id}\n\n replace_component_ids(cls.skill_config, cls.replacements)", "def on_register(cls):", "def configs(self):\n raise NotImplementedError()", "def instrument_info(self):\n return self._decorator_wrapper(EventName.instrument_info)", "def __init__(self,\n instruments: Optional[Union[Instrument, Iterable[Instrument], dict]] = (),\n name: Optional[str] = None):\n super().__init__()\n if isinstance(instruments, dict):\n inst_list = []\n for k, v in instruments.items():\n v.name = k\n inst_list.append(v)\n self.instruments = inst_list\n else:\n self.instruments = instruments\n\n self.name = name", "def setup_class(cls):", "def setup_class(cls):", "def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")", "def test_configure_without_inheritance(self):\n\n @Configurable(conf=category('TEST', Parameter('test', value=True)))\n class BaseTest(object):\n \"\"\"base Class to configure.\"\"\"\n\n class Test(BaseTest):\n \"\"\"Class to configure.\"\"\"\n\n targets = Test()\n\n self.assertTrue(targets.test)", "def test_register_manual_keys(self):\n registry = ClassRegistry()\n\n @registry.register('fire')\n class Charizard(Pokemon):\n pass\n\n @registry.register('water')\n class Blastoise(Pokemon):\n pass\n\n # By default, you have to specify a registry key when\n # registering new classes. We'll see how to assign\n # registry keys automatically in the next test.\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register\n class Venusaur(Pokemon):\n pass\n\n self.assertIsInstance(registry['fire'], Charizard)\n self.assertIsInstance(registry['water'], Blastoise)", "def includeme(cls, config):\n\n allow, on, after = unpack.directives(config)\n config.add_engine_resource(model.Model, model.IContainer)\n s.register('CREATED', 'STARTED',)\n a.register('START',)\n\n IModel = model.IModel\n allow(IModel, a.START, (s.CREATED), s.STARTED)\n allow(IModel, a.START, (s.CREATED), s.STARTED)", "def config_mapping(self) -> typing.Dict[str, type]:\n return self._subclasses", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def class_names(self):\n raise NotImplementedError", "def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)", "def _load(self) -> list[Instrument]:\n logger.info(\"Loading config...\")\n self._config = yml.load(self.configpath)\n instruments, modespec = self._config[\"instruments\"], self._config[\"modes\"]\n logger.success(f\"Found {len(instruments)} instruments, {len(modespec)} modes\")", "def class_exts(cls):\n return set()", "def _dump_registry(cls, file=None):\r\n print >> file, \"Class: %s.%s\" % (cls.__module__, cls.__name__)\r\n print >> file, \"Inv.counter: %s\" % ABCMeta._abc_invalidation_counter\r\n for name in sorted(cls.__dict__.keys()):\r\n if name.startswith(\"_abc_\"):\r\n value = getattr(cls, name)\r\n print >> file, \"%s: %r\" % (name, value)", "def __init__(self):\n self._inst = {}", "def addClassToPickleWhitelist(cls):\n unpickleWhitelist_.add(cls)", "def __init__(self):\n for base in AutomationSetup.__bases__:\n base.__init__(self)", "def test_register_detect_keys(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Charizard(Pokemon):\n element = 'fire'\n\n @registry.register\n class Blastoise(Pokemon):\n element = 'water'\n\n # You can still override the registry key if you want.\n @registry.register('poison')\n class Venusaur(Pokemon):\n element = 'grass'\n\n self.assertIsInstance(registry['fire'], Charizard)\n self.assertIsInstance(registry['water'], Blastoise)\n self.assertIsInstance(registry['poison'], Venusaur)\n\n # We overrode the registry key for this class.\n with self.assertRaises(RegistryKeyError):\n # noinspection PyStatementEffect\n registry['grass']", "def _record_usage(self, config):\n record_extra_usage_tag(TagKey.RLLIB_FRAMEWORK, config[\"framework\"])\n record_extra_usage_tag(TagKey.RLLIB_NUM_WORKERS, str(config[\"num_workers\"]))\n alg = self.__class__.__name__\n # We do not want to collect user defined algorithm names.\n if alg not in ALL_ALGORITHMS:\n alg = \"USER_DEFINED\"\n record_extra_usage_tag(TagKey.RLLIB_ALGORITHM, alg)", "def store(self, config_instance):\r\n pass", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def setup_class(self):\n pass", "def config(self):\n raise NotImplementedError", "def __init__(self, is_low_memory=False):\n self.is_low_memory = is_low_memory\n if is_low_memory:\n test_suffix = 'ForLowMemoryDevice'\n else:\n test_suffix = 'ForRegularDevice'\n class_name = self.__class__.__name__\n self.qualified_name = '%s.%s' % (class_name, test_suffix)\n self.tagged_name = self.qualified_name", "def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1", "def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)", "def set_capture_events_from_config(self):\n\n event_config = [\n {\n \"config_key\": \"events_watchlist\",\n \"events\": [\n \"watchlist.hit.process\",\n \"watchlist.hit.binary\",\n \"watchlist.storage.hit.process\",\n \"watchlist.storage.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"wlhitnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_feed\",\n \"events\": [\n \"feed.ingress.hit.process\",\n \"feed.ingress.hit.binary\",\n \"feed.ingress.hit.host\",\n \"feed.storage.hit.process\",\n \"feed.storage.hit.binary\",\n \"feed.query.hit.process\",\n \"feed.query.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"feedhitnotif\", \"0\")\n },\n {\n \"config_key\": \"events_alert\",\n \"events\": [\n \"alert.watchlist.hit.ingress.process\",\n \"alert.watchlist.hit.ingress.binary\",\n \"alert.watchlist.hit.ingress.host\",\n \"alert.watchlist.hit.query.process\",\n \"alert.watchlist.hit.query.binary\"\n ],\n \"options\": self.forwarder_options.get(\"alertnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_raw_sensor\",\n \"events\": [\n \"ingress.event.process\",\n \"ingress.event.procstart\",\n \"ingress.event.netconn\",\n \"ingress.event.procend\",\n \"ingress.event.childproc\",\n \"ingress.event.moduleload\",\n \"ingress.event.module\",\n \"ingress.event.filemod\",\n \"ingress.event.regmod\"\n \t\"ingress.event.tamper\",\n \t\t\"ingress.event.crossprocopen\",\n \t\t\"ingress.event.remotethread\",\n \t\t\"ingress.event.processblock\",\n \t\t\"ingress.event.emetmitigation\",\n ],\n \"options\": self.forwarder_options.get(\"rawsensnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_observed\",\n \"events\": [\"binaryinfo.host.observed\",\n \"binaryinfo.observed,\"\n \"binaryinfo.group.observed\"],\n\n \"options\": self.forwarder_options.get(\"binobsnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_upload\",\n \"events\": [\"binarystore.file.added\"],\n \"options\": self.forwarder_options.get(\"binuplnotifenabled\", \"0\")\n }\n ]\n\n self.capture_events = []\n for event_type in event_config:\n events = self.forwarder_options.get(event_type[\"config_key\"], \"0\").lower()\n if events == \"all\":\n self.capture_events.extend(event_type[\"events\"])\n elif events != \"0\":\n events_from_config = events.split(\",\")\n events_to_capture = list(set(events_from_config) & set(event_type[\"events\"]))\n self.capture_events.extend(events_to_capture)\n\n self.logger.info(\"Configured to capture events: %s\" % self.capture_events)", "def test_exclude_include_overlapping_ambiguous_and_includes_excluded_init_overridden_file_line():\n\n class iitem(ConfigItem):\n def __init__(self, mc_include, mc_exclude):\n super().__init__(mc_include=mc_include, mc_exclude=mc_exclude)\n\n with raises(ConfigException) as exinfo:\n class X():\n def __init__(self):\n iitem(mc_exclude=[dev2], mc_include=[dev2, prod])\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with root(aa=1):\n X()\n\n exp = \"Env('dev2') is specified in both include and exclude, with no single most specific group or direct env:\"\n assert exp in str(exinfo.value)", "def from_config(cls, config: Dict[str, Any]) -> \"ClassyLoss\":\n raise NotImplementedError()", "def extension(klass):\n registry.register(klass)\n return klass", "def __init__(self, cfg, tops_type=[3, 5, 10]):\n\n attr_cloth_file = open(cfg.attr_cloth_file).readlines()\n self.attr_idx2name = {}\n for i, line in enumerate(attr_cloth_file[2:]):\n self.attr_idx2name[i] = line.strip('\\n').split()[0]", "def instrumentName(self, instrumentName):\n\n self._instrumentName = instrumentName", "def includeme(cls, config):\n\n allow, on, after = unpack.directives(config)\n config.add_engine_resource(model.Model, model.IContainer)\n config.add_engine_resource(model.Foo, model.IFooContainer)\n s.register(\n 'CREATED',\n 'DRAFTED',\n 'PUBLISHED',\n 'PENDING_MODERATION',\n )\n a.register(\n 'DRAFT',\n 'PUBLISH',\n 'APPROVE',\n 'POKE',\n )\n\n allow(model.IModel, a.DRAFT, (s.CREATED), s.DRAFTED)\n allow(model.IModel, a.PUBLISH, (s.DRAFTED), s.PUBLISHED)\n allow(model.IFoo, a.PUBLISH, (s.DRAFTED), s.PENDING_MODERATION)\n allow(model.IFoo, a.APPROVE, (s.PENDING_MODERATION), s.PUBLISHED)\n allow(model.IFoo, a.POKE, '*', Ellipsis)", "def register_driver(self, key, cls):\n self.drivers.update({key: cls})", "def test_unique_keys(self):\n registry = ClassRegistry(attr_name='element', unique=True)\n\n # We can register any class like normal...\n # noinspection PyUnusedLocal\n registry.register(Charmander)\n\n # ... but if we try to register a second class with the same\n # key, we get an error.\n with self.assertRaises(RegistryKeyError):\n registry.register(Charmeleon)", "def __init__(self):\r\n self.addons = {}", "def register_analyzer(cls, analyzer_class):\n analyzer_name = analyzer_class.NAME.lower()\n if analyzer_name in cls._class_registry:\n raise KeyError('Class already set for name: {0:s}.'.format(\n analyzer_class.NAME))\n cls._class_registry[analyzer_name] = analyzer_class", "def configurables(cls):\n return find_class_instances(cls, Configurable)", "def _retrieve_instrument_configurations(self, instrumentConfigurationList_elm):\n \n for instrumentConfiguration_elm in instrumentConfigurationList_elm.findall(self._prefix+'instrumentConfiguration'):\n \n # retrieve configuration id\n config_id = instrumentConfiguration_elm.get('id', None)\n if not config_id:\n continue\n \n # init buffer\n self._instrument_configs[config_id] = {\n 'ionization_source': None,\n 'mass_analyzer': None}\n \n # retrieve all cv params\n for cvParam_elm in instrumentConfiguration_elm.iter(self._prefix+'cvParam'):\n key, value = self._parse_cv_param(cvParam_elm, 'instrument')\n if key:\n self._instrument_configs[config_id][key] = value", "def test_saves_config(self):\n class Test(pyperry.Base): pass\n Test.add_middleware('read', self.Middle, { 'foo': 'bar' })\n self.assertEqual(Test.adapter_config['read']['_middlewares'],\n [(self.Middle, { 'foo': 'bar' })])\n Test.add_middleware('read', self.Middle, { 'baz': 'boo' })\n self.assertEqual(Test.adapter_config['read']['_middlewares'],\n [\n (self.Middle, { 'foo': 'bar' }),\n (self.Middle, { 'baz': 'boo' }) ])", "def __determine_config_type():", "def register_aliases(self, aliases, plugin_class):\n for alias in aliases:\n self.plugins[alias] = plugin_class", "def loadDrivers(self):\n\n self.sources = {}\n for source in self.config['sources']:\n sourceConf = self.config['sources'][source]\n baseClass = sourceConf['baseClass']\n self.logger.debug(\"Loading: \" + source +\n \" instance of: \" + baseClass)\n sourceArgs = sourceConf['source-config']\n self.sources[source] = {}\n try:\n print(baseClass)\n tempModule = import_module('sources.' + baseClass)\n \"\"\"tempModule = __import__('sources.' + baseClass,\n globals(), locals(), [baseClass], -1)\n \"\"\"\n self.sources[source]['source'] = getattr(tempModule, str(\n baseClass))(sourceArgs)\n except Exception as e:\n self.logger.error(\"exception: \" + str(e))\n return None", "def __init__(self, name, config, handlers):\r\n # Get Class Collector config\r\n try:\r\n class_config = config['collectors']['CMDCollector']\r\n except KeyError:\r\n class_config = None\r\n super(CMDCollector, self).__init__(name, config, handlers)\r\n if class_config:\r\n self.config.merge(class_config)\r\n\r\n # vars = self.config['env_vars']\r\n # if not isinstance(vars, list):\r\n # vars = vars.split()\r\n # for var in vars:\r\n # key, param = var.split(':')\r\n # os.putenv(key, self.config[param])\r", "def __init__(self, cfgOb, **kwargs):\n self.__cfgOb = cfgOb\n\n self.__configName = kwargs.get(\"configName\", self.__cfgOb.getDefaultSectionName())\n self.__cachePath = kwargs.get(\"cachePath\", \".\")\n #\n self.__drugBankMappingDict = {}\n self.__csdModelMappingDict = {}\n self.__taxU = None\n self.__ecU = None\n self.__scopU = None\n self.__cathU = None\n self.__dbU = None\n self.__residU = None\n self.__psimodU = None\n self.__ccU = None\n self.__ccmU = None\n self.__commonU = None\n self.__dApiW = None\n self.__atcP = None\n # self.__siftsAbbreviated = kwargs.get(\"siftsAbbreviated\", \"PROD\")\n self.__siftsAbbreviated = kwargs.get(\"siftsAbbreviated\", \"TEST\")\n self.__ssP = None\n self.__vrptP = None\n self.__crP = None\n self.__jtaP = None\n self.__pcP = None\n self.__phP = None\n #\n #\n # self.__wsPattern = re.compile(r\"\\s+\", flags=re.UNICODE | re.MULTILINE)\n # self.__re_non_digit = re.compile(r\"[^\\d]+\")\n #\n self.__resourcesD = {\n \"SiftsSummaryProvider instance\": self.__fetchSiftsSummaryProvider,\n \"Dictionary API instance (pdbx_core)\": self.__fetchDictionaryApi,\n \"TaxonomyProvider instance\": self.__fetchTaxonomyProvider,\n \"ScopProvider instance\": self.__fetchScopProvider,\n \"CathProvider instance\": self.__fetchCathProvider,\n \"EnzymeProvider instance\": self.__fetchEnzymeProvider,\n \"DrugBankProvider instance\": self.__fetchDrugBankProvider,\n \"ResidProvider instance\": self.__fetchResidProvider,\n \"PsiModProvider instance\": self.__fetchPsiModProvider,\n \"ChemCompModelProvider instance\": self.__fetchChemCompModelProvider,\n \"ChemCompProvider instance\": self.__fetchChemCompProvider,\n \"AtcProvider instance\": self.__fetchAtcProvider,\n \"DictMethodCommonUtils instance\": self.__fetchCommonUtils,\n \"ValidationProvider instance\": self.__fetchValidationProvider,\n \"CitationReferenceProvider instance\": self.__fetchCitationReferenceProvider,\n \"JournalTitleAbbreviationProvider instance\": self.__fetchJournalTitleAbbreviationProvider,\n \"PubChemProvider instance\": self.__fetchPubChemProvider,\n \"PharosProvider instance\": self.__fetchPharosProvider,\n }\n logger.debug(\"Dictionary resource provider init completed\")\n #", "def _load_apis(self):\n cannabis_reports = __import__('cannabis_reports.apis')\n for class_name in cannabis_reports.apis.__all__:\n if not class_name.startswith('_'):\n cls = getattr(cannabis_reports.apis, class_name)\n api = AuthProxy(self.session, cls)\n setattr(self, class_name, api)\n self.__apis__[class_name] = api", "def cmdconf_type(command_name):\n def decorator(cls):\n \"\"\"\"\"\"\n CMDCONF_TYPES[command_name] = cls\n return cls\n\n return decorator", "def config(cls) -> HandlerConfig:\n MyType.clear_interning_cache()\n MyOtherType.clear_interning_cache()\n\n # Create the function to wrap.\n mock_function = mock.Mock()\n\n # Create fake unit classes.\n mock_from_unit = mock.MagicMock(spec=MyType.decorate(MyUnit))\n mock_from_unit.__name__ = \"MockFromUnit\"\n mock_to_unit = mock.MagicMock(spec=MyOtherType.decorate(MyUnit))\n mock_to_unit.__name__ = \"MockToUnit\"\n\n # Make it look like the two units are not compatible.\n mock_to_unit.is_compatible.return_value = False\n mock_from_unit.is_compatible.return_value = False\n\n # Create the wrapper instance.\n wrapper_class = CastHandler(mock_from_unit, mock_to_unit)\n # Wrap the function.\n wrapped_handler = wrapper_class(mock_function)\n\n return cls.HandlerConfig(handler=wrapped_handler,\n mock_function=mock_function,\n wrapper_class=wrapper_class,\n mock_from_unit=mock_from_unit,\n mock_to_unit=mock_to_unit)", "def setUpClass(cls):\n cls._no_default = imageroller.test.write_config(\n \"config\", CONFIG_NO_DEFAULT, CONFIG_DATA)\n cls._no_workers = imageroller.test.write_config(\n \"config\", CONFIG_NO_WORKERS, CONFIG_DATA)\n cls._zero_workers = imageroller.test.write_config(\n \"config\", CONFIG_ZERO_WORKERS, CONFIG_DATA)\n cls._no_server = imageroller.test.write_config(\n \"config\", CONFIG_NO_SERVER, CONFIG_DATA)\n cls._server_no_save_timeout = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_NO_SAVE_TIMEOUT, CONFIG_DATA)\n cls._server_no_retain_image = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_NO_RETAIN_IMAGE, CONFIG_DATA)\n cls._server_no_region = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_NO_REGION, CONFIG_DATA)\n cls._server_valid_minimal = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_VALID_MINIMAL, CONFIG_DATA)\n cls._server_valid_override = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_VALID_OVERRIDE, CONFIG_DATA)", "def __init__(self, app: Sanic):\n self.configurations = app.config\n\n Configs.__instance = self", "def models():\n # Do not include SingleAttacker as an available model, just get users to pass\n # an attacker model directly instead.\n return [cls for cls in AttackerConfiguration.__subclasses__() if cls != SingleAttacker] # pylint: disable=no-member", "def test_class_annotations():\n\n for cls in get_module_classes('HABApp.core.events.events', ('ComplexEventValue', 'AllEvents')).values():\n check_class_annotations(cls)", "def _make_custom_config(name: str = \"dummy_agent\", skills_num: int = 1) -> dict:\n # noqa\n def _make_skill(id_: int) -> Skill:\n return AEATestWrapper.make_skill(\n config=SkillConfig(name=f\"sc{id_}\", author=\"fetchai\"),\n handlers={\"dummy_handler\": DummyHandler},\n )\n\n return {\n \"name\": name,\n \"components\": [_make_skill(i) for i in range(skills_num)],\n }", "def __init__(self):\n self.config = {}" ]
[ "0.6167917", "0.6165286", "0.6108895", "0.5712228", "0.56472594", "0.5590002", "0.54058063", "0.5276742", "0.5238212", "0.517786", "0.5136025", "0.5066814", "0.5047145", "0.5043953", "0.5038126", "0.50306547", "0.502609", "0.5012202", "0.5012202", "0.50089884", "0.49899292", "0.4988904", "0.49703974", "0.49652734", "0.495198", "0.49453932", "0.4932651", "0.49235672", "0.49103412", "0.4871281", "0.48573828", "0.48495117", "0.48467982", "0.48428056", "0.48242524", "0.48175856", "0.4802544", "0.4799283", "0.47808152", "0.47674897", "0.4763415", "0.4755723", "0.47464335", "0.47365233", "0.4734509", "0.4734509", "0.47307032", "0.47180614", "0.47083038", "0.47016388", "0.46773022", "0.46762133", "0.4675537", "0.4670427", "0.46524867", "0.46434402", "0.4640116", "0.46190614", "0.4613064", "0.46101314", "0.45939806", "0.45840034", "0.4577545", "0.45705268", "0.45705268", "0.45705268", "0.45705268", "0.45649192", "0.45553932", "0.45514965", "0.45467478", "0.45465598", "0.4544784", "0.45436049", "0.4542905", "0.45423603", "0.45412582", "0.45345908", "0.45332304", "0.45292166", "0.4527797", "0.45178148", "0.45141166", "0.4512948", "0.45057735", "0.45031458", "0.45010033", "0.44995612", "0.44938964", "0.4489653", "0.4488627", "0.4484005", "0.4482499", "0.4482196", "0.4476338", "0.44726887", "0.44711435", "0.44674808", "0.44655883", "0.4462492" ]
0.66089696
0
Register instrument instances and self with daemon and storing uris
def _serve(self) -> None: for instrument in self._config["instruments"]: uri = self._daemon.register(instrument, objectId=str(instrument)) self._services[instrument.id] = str(uri) logger.success(f"Registered {instrument} at {uri}") self.uri = self._daemon.register(self, objectId=self.servername) logger.success(f"Registered self at {self.uri}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def on_register(cls):", "def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))", "def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instruments, self).__init__(endpoint, method=self.METHOD)", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def add_instrument(self, mount, instrument):\n pass", "def initiate(self):\n\n for item in config.WEATHER_PROVIDERS[self.title]:\n self.__setattr__(item, config.WEATHER_PROVIDERS[self.title][item])\n\n # RP5 and Sinoptik have same URLs for hourly and next day weather info\n if self.title in ('RP5', 'Sinoptik'):\n self.URL_hourly = self.URL\n self.URL_next_day = self.URL\n\n self.logger = self._get_logger(self.title, self.app.args.verbosity)", "def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])", "def register_publisher(self, hostname, expire=-1):", "def register(self, target, hostname, listener_type, expire=-1):", "def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def __init__(self):\n self._inst = {}", "def __init__(self, udisks):\n self.log = logging.getLogger('udiskie.daemon.Daemon')\n self.state = {}\n self.udisks = udisks\n\n self.event_handlers = {\n 'device_added': [],\n 'device_removed': [],\n 'device_mounted': [],\n 'device_unmounted': [],\n 'media_added': [],\n 'media_removed': [],\n 'device_unlocked': [],\n 'device_locked': [],\n 'device_changed': [self.on_device_changed]\n }\n\n for device in self.udisks.get_all_handleable():\n self._store_device_state(device)\n\n udisks.bus.add_signal_receiver(\n self._device_added,\n signal_name='DeviceAdded',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_removed,\n signal_name='DeviceRemoved',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_changed,\n signal_name='DeviceChanged',\n bus_name='org.freedesktop.UDisks')", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def __init__(self):\n self._registry = {}", "def async_register_services(hass, config, insteon_modem):\n\n def add_all_link(service):\n \"\"\"Add an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n mode = service.data.get(SRV_ALL_LINK_MODE)\n link_mode = 1 if mode.lower() == SRV_CONTROLLER else 0\n insteon_modem.start_all_linking(link_mode, group)\n\n def del_all_link(service):\n \"\"\"Delete an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.start_all_linking(255, group)\n\n def load_aldb(service):\n \"\"\"Load the device All-Link database.\"\"\"\n entity_id = service.data[CONF_ENTITY_ID]\n reload = service.data[SRV_LOAD_DB_RELOAD]\n if entity_id.lower() == ENTITY_MATCH_ALL:\n for entity_id in hass.data[DOMAIN][INSTEON_ENTITIES]:\n _send_load_aldb_signal(entity_id, reload)\n else:\n _send_load_aldb_signal(entity_id, reload)\n\n def _send_load_aldb_signal(entity_id, reload):\n \"\"\"Send the load All-Link database signal to INSTEON entity.\"\"\"\n signal = f\"{entity_id}_{SIGNAL_LOAD_ALDB}\"\n dispatcher_send(hass, signal, reload)\n\n def print_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)\n\n def print_im_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n print_aldb_to_log(insteon_modem.aldb)\n\n def x10_all_units_off(service):\n \"\"\"Send the X10 All Units Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_units_off(housecode)\n\n def x10_all_lights_off(service):\n \"\"\"Send the X10 All Lights Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_off(housecode)\n\n def x10_all_lights_on(service):\n \"\"\"Send the X10 All Lights On command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_on(housecode)\n\n def scene_on(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_on(group)\n\n def scene_off(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_off(group)\n\n hass.services.async_register(\n DOMAIN, SRV_ADD_ALL_LINK, add_all_link, schema=ADD_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_DEL_ALL_LINK, del_all_link, schema=DEL_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_LOAD_ALDB, load_aldb, schema=LOAD_ALDB_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_PRINT_ALDB, print_aldb, schema=PRINT_ALDB_SCHEMA\n )\n hass.services.async_register(DOMAIN, SRV_PRINT_IM_ALDB, print_im_aldb, schema=None)\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_UNITS_OFF, x10_all_units_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_OFF, x10_all_lights_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_ON, x10_all_lights_on, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_ON, scene_on, schema=TRIGGER_SCENE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_OFF, scene_off, schema=TRIGGER_SCENE_SCHEMA\n )\n _LOGGER.debug(\"Insteon Services registered\")", "def __init__(self):\n self.registry = {}", "def __init__(self):\n self._discovered_devices = {}\n self._discovered_ip = None", "def __init__(self):\n dispatcher.connect(self.stats_spider_closed, signal=signals.stats_spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.engine_stopped)", "def register(self):\n raise NotImplementedError()", "def register_router(self, hostname, expire=-1):", "def __init__(self, bootstrap_interval=None, run_migrations=True):\n super().__init__(bootstrap_interval, run_migrations)\n self.data_source_provider = RegistryDataSourceProvider()\n self.adapters = []\n self.adapters_by_domain = defaultdict(list)\n self.domains_to_skip = None", "def consul_register(self):\n self.log.debug(\"consul-register\")\n self.consul.agent.service.register(\n self.svc_name,\n address=self.this_host,\n check=consulate.models.agent.Check(\n name=\"qemu-process\",\n args=[\n \"/bin/sh\",\n \"-c\",\n \"test -e /proc/$(< /run/qemu.{}.pid )/mem || exit 2\".format(\n self.name\n ),\n ],\n interval=\"5s\",\n ),\n )", "def register(self, instance, storage_interface):\r\n self._instances[instance] = storage_interface", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def start(self):\r\n self._instagram_api = InstagramAPI(mongo_api=self._mongo_api)\r\n self._inst_run()", "def run(self, registry):", "def __init__(self) -> None:\n self._found_devices = {} # type: Dict[IPv4Address, conf.BaseService]", "def setUp(self):\n self.manager, self.proxy = tests.utils.setup_xmlrpc()\n self.proxy.provider.register(\n PROVIDER_ID, USERNAME, PASSWORD, URL, TENANT, PROVIDER_TYPE\n )", "def register(self):\n\n def nsr_id_from_keyspec(ks):\n nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)\n nsr_id = nsr_path_entry.key00.id\n return nsr_id\n\n def group_name_from_keyspec(ks):\n group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)\n group_name = group_path_entry.key00.scaling_group_name_ref\n return group_name\n\n def is_instance_in_reg_elements(nsr_id, group_name, instance_id):\n \"\"\" Return boolean indicating if scaling group instance was already commited previously.\n\n By looking at the existing elements in this registration handle (elements not part\n of this current xact), we can tell if the instance was configured previously without\n keeping any application state.\n \"\"\"\n for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):\n elem_nsr_id = nsr_id_from_keyspec(keyspec)\n elem_group_name = group_name_from_keyspec(keyspec)\n\n if elem_nsr_id != nsr_id or group_name != elem_group_name:\n continue\n\n if instance_cfg.id == instance_id:\n return True\n\n return False\n\n def get_scale_group_instance_delta(nsr_id, group_name, xact):\n\n #1. Find all elements in the transaction add to the \"added\"\n #2. Find matching elements in current elements, remove from \"added\".\n #3. Find elements only in current, add to \"deleted\"\n\n xact_ids = set()\n for instance_cfg, keyspec in self._scale_regh.get_xact_elements(xact, include_keyspec=True):\n elem_nsr_id = nsr_id_from_keyspec(keyspec)\n if elem_nsr_id != nsr_id:\n continue\n\n elem_group_name = group_name_from_keyspec(keyspec)\n if elem_group_name != group_name:\n continue\n\n xact_ids.add(instance_cfg.id)\n\n current_ids = set()\n for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):\n elem_nsr_id = nsr_id_from_keyspec(keyspec)\n if elem_nsr_id != nsr_id:\n continue\n\n elem_group_name = group_name_from_keyspec(keyspec)\n if elem_group_name != group_name:\n continue\n\n current_ids.add(instance_cfg.id)\n\n delta = {\n \"added\": xact_ids - current_ids,\n \"deleted\": current_ids - xact_ids\n }\n return delta\n\n def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):\n # Unforunately, it is currently difficult to figure out what has exactly\n # changed in this xact without Pbdelta support (RIFT-4916)\n # As a workaround, we can fetch the pre and post xact elements and\n # perform a comparison to figure out adds/deletes/updates\n xact_cfgs = list(dts_member_reg.get_xact_elements(xact))\n curr_cfgs = list(dts_member_reg.elements)\n\n xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}\n curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}\n\n # Find Adds\n added_keys = set(xact_key_map) - set(curr_key_map)\n added_cfgs = [xact_key_map[key] for key in added_keys]\n\n # Find Deletes\n deleted_keys = set(curr_key_map) - set(xact_key_map)\n deleted_cfgs = [curr_key_map[key] for key in deleted_keys]\n\n # Find Updates\n updated_keys = set(curr_key_map) & set(xact_key_map)\n updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]\n\n return added_cfgs, deleted_cfgs, updated_cfgs\n\n def on_apply(dts, acg, xact, action, scratch):\n \"\"\"Apply the configuration\"\"\"\n def handle_create_nsr(msg):\n # Handle create nsr requests \"\"\"\n # Do some validations\n if not msg.has_field(\"nsd_ref\"):\n err = \"NSD reference not provided\"\n self._log.error(err)\n raise NetworkServiceRecordError(err)\n\n self._log.info(\"Creating NetworkServiceRecord %s from nsd_id %s\",\n msg.id, msg.nsd_ref)\n\n #nsr = self.nsm.create_nsr(msg)\n return nsr\n\n def handle_delete_nsr(msg):\n @asyncio.coroutine\n def delete_instantiation(ns_id):\n \"\"\" Delete instantiation \"\"\"\n pass\n #with self._dts.transaction() as xact:\n #yield from self._nsm.terminate_ns(ns_id, xact)\n\n # Handle delete NSR requests\n self._log.info(\"Delete req for NSR Id: %s received\", msg.id)\n # Terminate the NSR instance\n #nsr = self._nsm.get_ns_by_nsr_id(msg.id)\n\n #nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)\n #event_descr = \"Terminate rcvd for NS Id:%s\" % msg.id\n #nsr.record_event(\"terminate-rcvd\", event_descr)\n\n #self._loop.create_task(delete_instantiation(msg.id))\n\n @asyncio.coroutine\n def begin_instantiation(nsr):\n # Begin instantiation\n pass\n #self._log.info(\"Beginning NS instantiation: %s\", nsr.id)\n #yield from self._nsm.instantiate_ns(nsr.id, xact)\n\n self._log.debug(\"Got nsr apply (xact: %s) (action: %s)(scr: %s)\",\n xact, action, scratch)\n\n if action == rwdts.AppconfAction.INSTALL and xact.id is None:\n self._log.debug(\"No xact handle. Skipping apply config\")\n xact = None\n\n (added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh, xact, \"id\")\n\n for msg in added_msgs:\n self._log.info(\"Create NSR received in on_apply to instantiate NS:%s\", msg.id)\n #if msg.id not in self._nsm.nsrs:\n # self._log.info(\"Create NSR received in on_apply to instantiate NS:%s\", msg.id)\n # nsr = handle_create_nsr(msg)\n # self._loop.create_task(begin_instantiation(nsr))\n\n for msg in deleted_msgs:\n self._log.info(\"Delete NSR received in on_apply to terminate NS:%s\", msg.id)\n try:\n handle_delete_nsr(msg)\n except Exception:\n self._log.exception(\"Failed to terminate NS:%s\", msg.id)\n\n for msg in updated_msgs:\n self._log.info(\"Update NSR received in on_apply to change scaling groups in NS:%s\", msg.id)\n\n for group in msg.scaling_group:\n instance_delta = get_scale_group_instance_delta(msg.id, group.scaling_group_name_ref, xact)\n self._log.debug(\"Got NSR:%s scale group instance delta: %s\", msg.id, instance_delta)\n\n #for instance_id in instance_delta[\"added\"]:\n # self._nsm.scale_nsr_out(msg.id, group.scaling_group_name_ref, instance_id, xact)\n\n #for instance_id in instance_delta[\"deleted\"]:\n # self._nsm.scale_nsr_in(msg.id, group.scaling_group_name_ref, instance_id)\n\n\n return RwTypes.RwStatus.SUCCESS\n\n @asyncio.coroutine\n def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):\n \"\"\" Prepare calllback from DTS for NSR \"\"\"\n\n xpath = ks_path.to_xpath(NsrYang.get_schema())\n action = xact_info.query_action\n self._log.debug(\n \"Got Nsr prepare callback (xact: %s) (action: %s) (info: %s), %s:%s)\",\n xact, action, xact_info, xpath, msg\n )\n\n fref = ProtobufC.FieldReference.alloc()\n fref.goto_whole_message(msg.to_pbcm())\n\n if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:\n pass\n # Ensure the Cloud account has been specified if this is an NSR create\n #if msg.id not in self._nsm.nsrs:\n # if not msg.has_field(\"cloud_account\"):\n # raise NsrInstantiationFailed(\"Cloud account not specified in NSR\")\n\n # We do not allow scaling actions to occur if the NS is not in running state\n #elif msg.has_field(\"scaling_group\"):\n # nsr = self._nsm.nsrs[msg.id]\n # if nsr.state != NetworkServiceRecordState.RUNNING:\n # raise ScalingOperationError(\"Unable to perform scaling action when NS is not in running state\")\n\n # if len(msg.scaling_group) > 1:\n # raise ScalingOperationError(\"Only a single scaling group can be configured at a time\")\n\n # for group_msg in msg.scaling_group:\n # num_new_group_instances = len(group_msg.instance)\n # if num_new_group_instances > 1:\n # raise ScalingOperationError(\"Only a single scaling instance can be created at a time\")\n\n # elif num_new_group_instances == 1:\n # scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]\n # if len(scale_group.instances) == scale_group.max_instance_count:\n # raise ScalingOperationError(\"Max instances for %s reached\" % scale_group)\n\n\n acg.handle.prepare_complete_ok(xact_info.handle)\n\n\n self._log.debug(\"Registering for NSR config using xpath: %s\",\n NsrDtsHandler.NSR_XPATH)\n\n acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)\n with self._dts.appconf_group_create(handler=acg_hdl) as acg:\n self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,\n flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,\n on_prepare=on_prepare)\n\n self._scale_regh = acg.register(\n xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,\n flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,\n )", "def save(self):\n try:\n root = ET.Element(\"registry\", attrib={\"save\":str(time.time())})\n root.append(ET.Comment(__WARNING_TEXT__))\n \n # Make sure we have the daemon's ID for next time we load!\n ET.SubElement(root, \"daemon\", attrib={\"id\":self._did})\n \n #Add attachments to the registry!\n attachments = ET.Element(\"attachments\")\n for attach in self._attachments.values():\n if attach.type in [PLUG, ALARM]:\n ET.SubElement(attachments, attach.type, attrib=attach.getAttrib())\n root.append(attachments)\n \n #Add all the events\n events = ET.Element(\"events\")\n for event in self._events.values():\n ET.SubElement(events, \"event\", attrib=event.getAttrib())\n root.append(events)\n \n #Add all registered alerts\n alerts = ET.Element(\"alerts\")\n for alert in self._alerts.values():\n ET.SubElement(alerts, \"alert\", attrib=alert.getAttrib())\n root.append(alerts)\n \n #Add all subscriptions\n subscriptions = ET.Element(\"subscriptions\")\n for subscription in self._subscriptions.values():\n ET.SubElement(subscriptions, \"sub\", attrib=subscription.getAttrib())\n root.append(subscriptions)\n \n # Save everything to the file that we were given on startup.\n logging.debug(\"Saving registry to %s\"%self._file)\n if self.__try_setup_path(self._file):\n self.__makeBackup()\n \n tree = ET.ElementTree(root)\n try: \n # First try to save via a byte stream, if that doesn't work \n # utilize the basic string save. which might not work.\n with open(self._file, \"wb\") as savefile:\n tree.write(savefile)\n except:\n with open(self._file, \"w\") as savefile:\n tree.write(savefile)\n # if the second attempt fails it is caught by the function\n # try-catch. Which will restore backups and log the errors.\n logging.debug(\"Registry Saved!\")\n self.__removeBackup()\n return True\n else: return False\n except Exception as e:\n logging.exception(e)\n self.__restoreBackup()\n return False", "def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):\n self._base_dir = os.path.abspath(check.str_param(base_dir, \"base_dir\"))\n mkdir_p(self._base_dir)\n\n self._obs = None\n\n self._watchers = defaultdict(dict)\n self._inst_data = check.opt_inst_param(inst_data, \"inst_data\", ConfigurableClassData)\n\n # Used to ensure that each run ID attempts to initialize its DB the first time it connects,\n # ensuring that the database will be created if it doesn't exist\n self._initialized_dbs = set()\n\n # Ensure that multiple threads (like the event log watcher) interact safely with each other\n self._db_lock = threading.Lock()\n\n if not os.path.exists(self.path_for_shard(INDEX_SHARD_NAME)):\n conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)\n engine = create_engine(conn_string, poolclass=NullPool)\n self._initdb(engine)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()", "def __init__(self, resources={}):\n super().__init__()\n self._lock = threading.Lock()\n self._resources = {}\n for key, resource in resources.items():\n self.register(key, resource)", "def __init__(self):\n self.redis = RedisClient()\n self.crawlers = [crawler_cls() for crawler_cls in crawlers_cls]", "def register(cls, L):\r\n ...", "def register_instance(self, instance):\n self.instance = instance", "def setup(self):\n # Instrument names\n instruments = list(self.features_df[\"instrument\"].unique())\n\n # Get Muxes for each instrument.\n inst_muxes = [self._instrument_mux(i) for i in instruments]\n\n # Construct the streams for each mux.\n mux_streams = [pescador.Streamer(x) for x in inst_muxes\n if x is not None]\n\n # Construct the master mux\n master_mux = pescador.mux(mux_streams, **self.master_mux_params)\n # We have to wrap the mux in a stream so that the buffer\n # knows what to do with it.\n self.master_stream = pescador.Streamer(master_mux)\n\n # Now construct the final streamer\n if self.use_zmq:\n self.buffered_streamer = zmq_buffered_stream(\n self.master_stream, self.batch_size)\n else:\n self.buffered_streamer = buffer_stream(\n self.master_stream, self.batch_size)", "def registration_started(self):\n pass", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def _register(service, notifier=None):\n\n full_name = service.iden\n slot = service_store[full_name]\n try:\n slot['msg'] = 'Async image creation started'\n slot['stage'] = 2\n service_store[full_name] = slot\n\n service.make_image()\n\n slot['msg'] = 'Image for service created'\n slot['stage'] = 3\n service_store[full_name] = slot\n\n service.start_workers()\n\n slot['msg'] = 'Workers started'\n slot['stage'] = 4\n service_store[full_name] = slot\n\n service.check_health()\n\n slot['msg'] = 'Service ready'\n slot['stage'] = 5\n slot['slot'] = 'ready'\n slot['service'] = service\n service_store[full_name] = slot\n\n result = ok\n data = service\n except Exception as exc:\n slot['msg'] = 'Error: {}'.format(exc)\n slot['slot'] = 'error'\n service_store[full_name] = slot\n\n result = error\n data = str(exc)\n\n if service.notify and notifier is not None:\n notifier(service.notify, result, data)", "def __init__(self):\n self.uris = AsyncSet('uris')\n self.sockets = AsyncSet('sockets')", "def register_artifacts(self, which_pass):\n\n artifact_manager.register_temp_file(\n config.MIRROR_NODES_INDEX_TABLE, which_pass\n )\n artifact_manager.register_temp_file(\n config.MIRROR_NODES_STORE, which_pass\n )", "def setup(self):\n self.config = pau.IConfig\n self.session = pau.ISession\n pau.resolve(self)\n\n self.session.assets = Assets()\n self.config.db = self.db_name\n\n self.db = pau.IDb\n pau.resolve(self)\n\n # Instance\n i = Setup()\n pau.resolve(i)\n return i", "def __init__(self):\n self.__instance = None\n self.__configured_visitors = {}", "def register_instance(self, obj):\n self.__instances.append(obj)\n self._proxy_class_methods(obj)", "def _register(self, comm, handler):", "def insertInstrument(self, instrument):\n self.instruments.append(instrument)\n if instrument.bench and instrument.bench not in self.benches:\n logger.warning(\"Insterting *new* bench %s\", instrument.bench.name)\n self.benches.append(instrument.bench)\n if instrument.host and instrument.host not in self.hosts:\n logger.warning(\"Inserting *new* host %s\", instrument.host.name)\n self.hosts.append(instrument.host)", "def __init__(self, instrument):\n self._unsub_dispatcher = None\n self._instrument = instrument\n self._state = instrument.state", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "def register_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.register_instances(self.name, instances)\r\n self.instances = new_instances", "def __init__(self):\n description = \"Manage Yarely sensors\"\n\n # The parent constructor provides config and logging and gets a\n # starting set of handlers using this classes _init_handlers() method.\n super().__init__(ZMQ_SENSORMANAGER_REP_PORT, description)\n self.registered = False\n\n # Setup for ZMQ Scheduler Messaging\n sensor_term_id = \"sensormanager_term_{id}\"\n self.zmq_sensormanager_term_identifier = sensor_term_id.format(\n id=id(self)\n )\n self.zmq_scheduler_req_addr = ZMQ_ADDRESS_LOCALHOST.format(\n port=ZMQ_SENSORMANAGER_REQ_PORT\n )\n self.zmq_scheduler_request_queue = queue.Queue() # Q of infinite size", "def load_epicsLive(self, instrument=None, **kwargs):\n# if 'instrument' in kwargs:\n# instrument = kwargs.get('instrument')\n# self.instrument = instrument\n if not instrument:\n instrument = self.instrument\n\n self.ioc = psioc.IOC(instrument=self.instrument)\n if self.instrument in ['cxi', 'mfx', 'xcs', 'mec']:\n self.ioc.load_cfg('xrt')\n \n print 'Loading EpicsSets for', instrument\n self.epicsLive = lcls_devices.EpicsSets(instrument=instrument, **kwargs)\n self.update_epicsLive()", "def __init__(self):\n self._pio = pio\n self._RUN_PATH = system_files.GEOPM_SERVICE_RUN_PATH\n self._SAVE_DIR = 'SAVE_FILES'\n self._WATCH_INTERVAL_SEC = 1\n self._active_sessions = system_files.ActiveSessions()\n self._access_lists = system_files.AccessLists()\n for client_pid in self._active_sessions.get_clients():\n is_active = self.check_client(client_pid)\n if is_active:\n watch_id = self._watch_client(client_pid)\n self._active_sessions.set_watch_id(client_pid, watch_id)\n with system_files.WriteLock(self._RUN_PATH) as lock:\n write_pid = lock.try_lock()\n if write_pid is not None and not self._active_sessions.is_client_active(write_pid):\n self._close_session_write(lock, write_pid)", "def register(self):\n mDNSServices = {}\n for srv in ServiceType:\n mDNSServices.update({srv.name: {}})\n self.service = srv.name\n self.regtype, self.port = ServiceType[self.service].value\n if not self._is_running():\n continue\n\n txtrecord, interfaceIndex = self._generate_txtRecord()\n if txtrecord is None:\n continue\n\n port = self._get_port()\n\n self.logger.trace(\n 'Registering mDNS service host: %s, regtype: %s, port: %s, interface: %s, TXTRecord: %s',\n self.hostname, self.regtype, port, interfaceIndex, txtrecord\n )\n for i in interfaceIndex:\n mDNSServices[srv.name].update({i: {\n 'sdRef': None,\n 'interfaceIndex': i,\n 'regtype': self.regtype,\n 'port': port,\n 'txtrecord': txtrecord,\n 'name': self.hostname\n }})\n\n mDNSServices[srv.name][i]['sdRef'] = pybonjour.DNSServiceRegister(\n name=self.hostname,\n regtype=self.regtype,\n interfaceIndex=i,\n port=port,\n txtRecord=txtrecord,\n callBack=None\n )\n\n self.finished.wait()\n for srv in mDNSServices.keys():\n for i in mDNSServices[srv].keys():\n self.logger.trace('Unregistering %s %s.',\n mDNSServices[srv][i]['name'], mDNSServices[srv][i]['regtype'])\n mDNSServices[srv][i]['sdRef'].close()", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "def registered(self):\n log.info(\"Registered.\")\n pass", "def register_captured_url(url_list, url): \n if gs.local:\n register_captured_url_local(url_list, url)\n else:\n register_captured_url_aws(url_list, url)", "def _generate_trading_instances(self, start_date, end_date, instruments, params):\n configuration = self.configuration\n configuration.start_date = start_date\n configuration.end_date = end_date\n configuration.instruments = instruments\n\n logger.info(\"Creating DataHandler, Strategy, Portfolio and ExecutionHandler\")\n logger.info(\"Start date: %s\" % start_date)\n logger.info(\"End date: %s\" % end_date)\n logger.info(\"Instrument(s): %s...\" % instruments)\n logger.info(\"Params: %s...\" % params)\n\n self.data_handler = self.data_handler_cls(self.events, configuration)\n self.strategy = self.strategy_cls(self.data_handler, self.events, configuration, **params)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, configuration)\n self.execution_handler = self.execution_handler_cls(self.data_handler, self.events, configuration)", "def __init__(self, datadir, wsgi_mountpoint='/swab'):\n self.datadir = datadir\n\n #: Mapping of {<experiment name>: <Experiment object>}\n self.experiments = {}\n\n #: Mapping of {<goal name>: [<Experiment object>, ...]}\n self.experiments_by_goal = {}\n\n self.wsgi_mountpoint = wsgi_mountpoint\n makedir(self.datadir)", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def __init__(self, name, daemon, nameserver,vectorClock):\n self._type = constants.ProcessConstants.TYPE_GATEWAY\n self._name = name\n self._ID = 1\n\n self._IDtoTypeMap = {}\n self._databaseName = None\n self._counter = 1\n self.eligible = True\n self.timeServer = False\n self.timeStamp = time()\n self.vectorClock = vectorClock\n\n self._registerOnServer(daemon, nameserver,self.vectorClock)", "def __init__(self):\n self.policy_store = PolicyStore()\n self.service_store = {}\n\n from ranger_performance_tool import perf_globals\n enabled_services = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\")\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n for service_name in enabled_services:\n if service_name not in service_type_mapping:\n raise Exception(f\"Unknown service name:{service_name}. \"\n f\"Add it to service_type_mapping in secondary config file\")\n service_type = service_type_mapping[service_name]\n random_type = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"services\",\n service_type, \"random_type\")\n self.service_store[service_type] = RangerAPIObjectStore.service_store_def[service_type](random_type=random_type)", "def __init__(self):\n # BASE_DIR:///artifice/scraper/\n self.BASE_DIR = os.path.dirname(loc)\n\n # prototypes\n self._eth0 = '0.0.0.0'\n self._exposed_port = 8080\n self._db_name = 'site.db'\n self._redis_pword = 'password'\n self._redis_host = 'localhost'\n self._redis_port = 6379\n self._celery_broker_uname = 'michael'\n self._celery_broker_pword = 'michael123'\n self._celery_broker_host = 'localhost'\n self._celery_broker_virtual_host = 'michael_vhost'\n\n # flask\n self.TESTING = False\n self.URL_PREFIX = ''\n self.FLASK_PORT = self._exposed_port\n self.FLASK_HOST = '0.0.0.0'\n self.FLASK_DEBUG = False\n self.FLASK_USE_RELOADER = False\n self.FLASK_THREADED = True\n\n # logging\n self.LOG_FILE = 'flask.log'\n self.LOG_LEVEL = 'INFO'\n self.CELERY_LOG_LEVEL = 'ERROR'\n self.CELERY_LOG_FILE = 'celery.log'\n self.STDOUT = True\n\n # database\n self.DROP_TABLES = True\n self.SQLALCHEMY_TRACK_MODIFICATIONS = False\n self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n os.path.join(self.BASE_DIR, self._db_name))\n\n # redis\n self.REDIS_URL = 'redis://{}:@{}:{}/0'.format(\n self._redis_pword,\n self._redis_host,\n self._redis_port)\n self.REDIS_HIT_COUNTER = 'HIT_COUNTER'\n\n # defaults\n self.ARGS_DEFAULT_LIMIT = 10\n self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE']\n\n self.SUPERVISOR_ENABLED = True\n self.SUPERVISOR_DEBUG = False\n self.SUPERVISOR_POLITE = 1\n\n # celery\n self.CELERY_WORKERS = 8\n self.CELERY_MODULE = 'background'\n self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format(\n self._celery_broker_uname,\n self._celery_broker_pword,\n self._celery_broker_host,\n self._celery_broker_virtual_host)\n self.CELERY_BACKEND = 'rpc://'\n self.CELERY_INCLUDE = ['artifice.scraper.background.tasks']\n\n # endpoints\n self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port)\n self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port)\n self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)", "def __init__(self):\n\t\tself.instances = {}\n\t\twith open(os.path.join(os.path.dirname(__file__), 'conf', 'parliaments.json'), 'r') as f:\n\t\t\tparliaments = json.load(f)\n\t\tfor c, cp in parliaments.items():\n\t\t\tfor p in cp:\n\t\t\t\tpfx = c + '/' + p['code']\n\t\t\t\tself.instances[pfx] = create_app(c, p)", "def register_resources(self):\n raise NotImplementedError", "def account_instruments(self, instruments: Instruments = sentinel):\n pass", "def register(self):\n if self.registered:\n return\n\n config = current_app.config.get('TERMINAL_CONFIGS', {})\n apps = config.get('apps', [])\n\n for app in apps:\n cls, mod = app.rsplit('.', maxsplit=1)\n imported = import_module(cls)\n instance = getattr(imported, mod)()\n\n if getattr(instance, 'name', None) is None:\n continue\n\n if getattr(instance, 'hidden', False):\n self.hidden[getattr(instance, 'name')] = instance\n else:\n self.apps[getattr(instance, 'name')] = instance\n\n self.__set_apps_aliases(getattr(instance, 'name'), getattr(instance, 'aliases'))\n\n self.registered = True", "def __init__(self, epics_only=False, *args, **kwargs):\n self._kwargs = {}\n self._detectors = {}\n self._det_list = [] \n self._det_aliases = {}\n self._psplots = {}\n self._event_functions = {}\n self._source_attrs = []\n self._evt_time_last = (0,0)\n self.ievent = 0\n self._reloadOnLoadRun = False\n self._reloadOnNextEvent = False\n self.psana_cfg_dict = {}\n self._default_module_path = ''\n\n# self._user_attrs = {}\n# self._histograms = {}\n \n for key in kwargs:\n self._kwargs[key] = kwargs[key] \n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n print 'setting ',key, kwargs[key]\n\n self._device_config = read_device_config(**kwargs)\n self._device_sets = self._device_config['device_sets'] \n self._device_types = self._device_config['device_types'] \n\n for det in self._device_sets:\n if 'det' in self._device_sets[det]:\n if ('detName' in self._device_sets[det]['det'] or\n 'typeName' in self._device_sets[det]['det']):\n self._det_list.append(det)\n if 'det_key' in self._device_sets[det]['det']:\n det_key = self._device_sets[det]['det']['det_key']\n self._det_aliases[det_key] = det \n else:\n pass\n \n# if 'pvs' in self._device_sets[det]:\n# for attr in self._device_sets[det]['pvs']:\n# pvbase = self._device_sets[det]['pvs'][attr]['base']\n# alias = '_'.join([det,attr])\n# self.add_pv(pvbase, alias)\n\n self.set_exp_defaults(**kwargs)\n if not self._kwargs.get('noload'):\n self.data_source = self.get_data_source(**kwargs)\n print 'Data Source = ', self.data_source\n else:\n self.data_source = None\n\n if not self.data_source:\n self._kwargs['noload'] = True\n else:\n kwargs['run'] = self.run\n\n# if self._kwargs.get('noload') or self.live:\n# if self._kwargs.get('epics_live'):\n# self.set_kwargs(ami=True)\n \n if self._kwargs.get('ami'):\n print 'loading ami'\n self.load_ami(**kwargs)\n\n if not self._kwargs.get('noload'):\n print 'loading run'\n self.load_run(*args, **kwargs)\n self._no_epicsStore = False\n \n print 'Instrument = ', self.instrument\n\n if self._kwargs.get('epics_live'): # and self._kwargs.get('epics_file'):\n print 'loading epics'\n self.load_epicsLive(**kwargs)\n\n if self.ds and self.live:\n self.next_event()\n \n if self.ds and self._reloadOnNextEvent:\n self.next_event()\n \n if not self.ds:\n self._no_epicsStore = True\n self._no_evtData = True\n for det in self._device_sets:\n if 'pvs' in self._device_sets[det]:\n print 'Adding epics ',det\n self.add_detector(det)", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def connect_instrument(self):\n for instrument in self.rm.list_resources():\n try:\n k2400 = self.init_inst(instrument)\n k2400.timeout = 5000\n if k2400.query('*IDN?')[:8] == 'KEITHLEY':\n return k2400\n except AttributeError as f:\n logger.warning(f'Unknown error - {f}')\n except errors.VisaIOError as e:\n logger.warning(f'Not possible to connect the port - {k2400}.')", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def __init__(self):\n self.backends = {}\n self.timeout = 5.0\n self._credentials = {}\n self._s3_folder = []", "def __init__(self, **kwargs):\n super(Service, self).__init__(**kwargs)\n\n self.whitelist.append(urlparse.urlparse(self.url).hostname)\n self.whitelist.extend(get_nameservers())\n self.whitelist.append('172.17.42.1')\n self.whitelist = list(set(self.whitelist))\n self.validate_whitelist()\n\n self.main_module_path = self.find_main_module()\n self.language = self.detect_language(self.main_module_path)\n self.state = None\n self.workers = []", "def __init__(self, reactor, resolver, pools, scansets, env):\n self.reactor = reactor\n self.resolver = resolver\n self.env = ScanEnvironment(reactor, resolver)\n # XXX quick hack, refactor later.\n for k, v in env.items():\n setattr(self.env, k, v)\n self.scans = {}\n self.pools = dict((name, defer.DeferredSemaphore(n))\n for name, n in pools.items())\n self.dnspool = self.pools.get(\n 'dns', defer.DeferredSemaphore(default_dns_pool_size))\n self.scansets = scansets", "def __init__(self, ignores):\n self._server = CoapServer(port=5683)\n self._server.registerForResourceGet(self._getResource)\n self._ignores = ignores", "def remote_registerEngine(self, engineReference):", "def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()", "def autodiscover():\n \n _autodiscover(registry)", "def boot():\n\t\tcreate_project_url_dir(Spider.project_name)\n\t\tcreate_url_data(Spider.project_name, Spider.base_url)\n\t\tSpider.queue = file_to_set(Spider.queue_file)\n\t\tSpider.crawled = file_to_set(Spider.crawled_file)", "def __init__(self):\n self.addon_config = None\n self.network_key = None\n self.usb_path = None\n self.use_addon = False\n # If we install the add-on we should uninstall it on entry remove.\n self.integration_created_addon = False\n self.install_task = None", "def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n is_class = inspect.isclass(endpoint)\n is_subclass = is_class and issubclass(endpoint, self.Endpoint)\n not_endpoint = endpoint is not self.Endpoint\n\n if is_subclass and not_endpoint:\n endpoint_instance = endpoint(self.session)\n setattr(self, name.lower(), endpoint_instance)", "def __init__(self, ip, username, password, rm=SSHResourceManager()):\n self.ip=ip\n self.username=username\n self.password=password\n self.instrument=rm.open_resource(ip, username, password)\n logger.info(\"Connected to instrument at %s@%s\" % (username,ip))\n #self.instrument.timeout=2000", "def registerWithSitemap(self):\n\n self.core.requireUniqueService('registerWithSitemap')\n\n #from soc.modules.seeder.views import seeder\n #self.core.registerSitemapEntry(seeder.view.getDjangoURLPatterns())", "def regen(self):\n self.create(overwrite=True)\n self.load()", "def __init__(self, config):\n self._host = config['host']\n self._username = config['username']\n self._password = config['password']\n self._vc_name = config['Name']\n self._ingest_token = config['IngestToken']\n self._ingest_endpoint = config['IngestEndpoint']\n self._ingest_timeout = config['IngestTimeout']\n self._logger = logging.getLogger(self.get_instance_id())\n self._si = None\n self._connect()\n if self._si is None:\n raise ValueError(\"Unable to connect to host\")\n self._ingest = self._create_signalfx_ingest()\n if self._ingest is None:\n raise ValueError(\"Unable to create ingest client\")\n self._additional_dims = config.get('dimensions', None)\n if 'MORSyncInterval' not in config:\n config['MORSyncInterval'] = constants.DEFAULT_MOR_SYNC_INTERVAL\n self._mor_sync_timeout = config.get('MORSyncTimeout', constants.DEFAULT_MOR_SYNC_TIMEOUT)\n self._metric_sync_timeout = config.get('MetricSyncTimeout', constants.DEFAULT_METRIC_SYNC_TIMEOUT)\n self._inventory_mgr = inventory.InventoryManager(self._si, config['MORSyncInterval'],\n config['Name'], self.get_instance_id())\n self._inventory_mgr.start()\n if 'MetricSyncInterval' not in config:\n config['MetricSyncInterval'] = constants.DEFAULT_METRIC_SYNC_INTERVAL\n self._metric_conf = self._get_metric_config(config)\n self._metric_mgr = metric_metadata.MetricManager(self._si, config['MetricSyncInterval'],\n self._metric_conf, config['Name'], self.get_instance_id())\n self._metric_mgr.start()\n self._wait_for_sync()", "def register_uris(self, uri_mock_list=None):\n assert not self.__register_uris_called\n self.__do_register_uris(uri_mock_list or [])\n self.__register_uris_called = True", "def register(self, gauge):\r\n raise NotImplementedError", "def _registerEvent(self):\n # new DyStockDataTicksGateway instance for each ticks hand to avoid mutex\n self._ticksGateways = [DyStockDataTicksGateway(self._eventEngine, self._info, i) for i in range(DyStockDataEventHandType.stockHistTicksHandNbr)]", "def start(self):\n if self._started:\n return\n\n self._register()\n self._started = True", "def register(url, interval=300):\n return Client.get_client().register(url, interval=interval)", "def __init__(self, *args, **kwargs):\n self.quiet_mode = False\n self.mitm_pid = None\n self.mitm_dns_active = list()\n\n # Allow to skip re-executing iptable rules\n self.is_configured = kwargs.pop(\"mitm_configured\", False)\n\n # allowing MITM to have access to all devices to intercept.\n self.dev_mgr = kwargs.get(\"mgr\")\n self.intercepts = kwargs.pop(\"intercepts\", {})\n\n self.log_name = self.dev_mgr.board.config.get_station() + \".mitm\"\n self._tr069_ips = None\n atexit.register(self.stop_capture)\n\n MITM.configure_profile(self)" ]
[ "0.61770165", "0.586058", "0.5787275", "0.57755834", "0.5748999", "0.5597987", "0.5547358", "0.5466158", "0.5462284", "0.5456908", "0.5437423", "0.54095894", "0.5405339", "0.5405339", "0.53873736", "0.53591174", "0.5346121", "0.5344868", "0.5339623", "0.53348446", "0.53146374", "0.5310043", "0.530068", "0.5300645", "0.52885294", "0.52769274", "0.5273985", "0.52710813", "0.52710813", "0.52694774", "0.5262329", "0.5252448", "0.52511936", "0.5212997", "0.5211245", "0.5194885", "0.5149796", "0.5149487", "0.5145623", "0.51313907", "0.51278913", "0.5118596", "0.5118169", "0.51019293", "0.5099111", "0.5098547", "0.5094128", "0.50934434", "0.5089512", "0.5081912", "0.50747514", "0.5064594", "0.5063369", "0.5053915", "0.5047243", "0.50405395", "0.5031404", "0.50002337", "0.49953872", "0.49938032", "0.49920118", "0.49885052", "0.49883637", "0.498569", "0.49805254", "0.49785084", "0.49767944", "0.49766037", "0.4969037", "0.49673104", "0.49661234", "0.49524963", "0.49469212", "0.4942267", "0.49320096", "0.49320096", "0.49320096", "0.49320096", "0.49225345", "0.4917687", "0.4914461", "0.49106902", "0.49104315", "0.49045303", "0.49040207", "0.49026793", "0.4900428", "0.48997182", "0.48963514", "0.4894293", "0.48923975", "0.48905978", "0.48889405", "0.48854515", "0.4881979", "0.4880646", "0.48777696", "0.4875128", "0.48706147", "0.48670086" ]
0.78891295
0
Save instruments and modes to configpath
def save(self) -> None: logger.info("Saving to config...") yml.save(self._config, self.configpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save(self, config_path):\n raise NotImplementedError()", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_dir)\n\n\t\t\t#actions\n\t\t\tConfig.setBool('actions', 'save_to_file', Config.save_to_file)\n\t\t\tConfig.setBool('actions', 'save_to_tag', Config.save_to_tag)\n\n\t\t\t#sources\n\t\t\tConfig.setBool('sources', 'lyric_wikia', Config.lyric_wikia)\n\t\t\tConfig.setBool('sources', 'musix_match', Config.musix_match)\n\t\t\tConfig.setBool('sources', 'lyricsmode', Config.lyricsmode)\n\t\t\tConfig.setBool('sources', 'az_lyrics', Config.az_lyrics)\n\n\t\t\twith open(Config.config_path, 'w') as configfile:\n\t\t\t\tConfig.conf.write(configfile)\n\t\t\treturn True\n\n\t\t# Catch all config parser errors\n\t\texcept BaseConfigParserError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save_switch_configs(self):", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def save_config(self, *args, **kwargs):\n raise NotImplementedError", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save():\n\n env.config.save(env.config_file)", "def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def saveConfig(config):\n global SW_CONFIG\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", config['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", config['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", config['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", config['sw_version'])\n cf.set(\"sw_config\", \"startup\", config['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", False)\n cf.set(\"run_config\", \"backup\", False)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()\n SW_CONFIG = config", "def save_to_conf(self):\r\n raise NotImplementedError", "def save_to_conf(self):\n raise NotImplementedError", "def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)", "def save_configuration(config):\n with open(cwd + '/configuration.pickle', 'wb') as handle:\n pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def saveNewConfiguration(self):\n selection = tk.filedialog. \\\n asksaveasfilename(title=\"Save CHUM configuration\")\n if selection:\n self._currentConfiguration = selection\n self._saveToFilePath(selection)", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def save(self, savedir='.', fname=None):\n # Build up the file path to write to.\n dirpath = os.path.abspath(savedir)\n if fname is None:\n fname = os.path.basename(self.fname)\n\n name = os.path.splitext(fname)[0]\n path = os.path.join(dirpath, name + '.conf')\n\n # Put all comments and attributes into string formats.\n lines = ['# %s' % comment for comment in self.comments]\n lines.append('')\n for letter, section in self.config_guide.items():\n names = getattr(self, section)\n if names:\n if isinstance(names, basestring):\n line = '%s: %s;' % (letter, names)\n else:\n line = '%s: %s;' % (letter, ', '.join(names))\n lines.append(line)\n\n # Write the config.\n with open(path, 'w') as f:\n f.write('\\n'.join(lines))", "def saveConfig():\n with open(_CONFIG_FNM, 'w') as configfile:\n CONFIG_DICT.write(configfile,\n space_around_delimiters=True)", "def save_options(self,config,options_file):\n \n config.set('manager-editable','media_offset',self.media_offset)\n config.set('manager-editable','profiles_offset',self.pp_profiles_offset)\n config.set('manager-editable','use_sudo',self.use_sudo)\n config.set('manager-editable','options',self.options)\n\n config.set('manager-editable','autostart_path',self.autostart_path) \n config.set('manager-editable','autostart_use_sudo',self.autostart_use_sudo)\n config.set('manager-editable','autostart_options',self.autostart_options)\n \n with open(options_file, 'wb') as config_file:\n config.write(config_file)", "def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def saveCfgFile(self):\n mash_ini = False\n if GPath('mash.ini').exists():\n mashIni = ConfigParser.ConfigParser()\n mashIni.read('mash.ini')\n mash_ini = True\n instPath = GPath(mashIni.get('General','sInstallersDir').strip()).s\n else:\n instPath = \"\"\n if instPath != dirs[\"installers\"].s:\n if not mash_ini:\n if os.path.exists(os.path.join(os.getcwd(), \"mash_default.ini\")):\n f = open(os.path.join(os.getcwd(), \"mash_default.ini\"), \"r\")\n d = f.read()\n f.close()\n else:\n d = \"[General]\\n\"\n f = open(os.path.join(os.getcwd(), \"mash.ini\"), \"w\")\n f.write(d)\n f.close()\n mashIni = ConfigParser.ConfigParser()\n mashIni.read('mash.ini')\n mashIni.set(\"General\",\"sInstallersDir\",os.path.abspath(dirs[\"installers\"].s))\n f = open(os.path.join(os.getcwd(), \"mash.ini\"),\"w\")\n mashIni.write(f)\n f.close()", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def save_config(self, path):\n if os.path.isdir(path):\n path = os.path.join(path, 'config.json')\n print('Save config to {}'.format(path))\n with open(path, 'w', encoding='utf-8') as w:\n w.write(json.dumps(self.to_dict(), indent=2,\n sort_keys=True))", "def saveExitConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()\n self.close()", "def save( self ):\n ini = codecs.open(self.filename,\"w\",\"utf-8\",errors=\"replace\",buffering=0)\n for (name,value) in self.conf.items():\n print >>ini, name, \"=\", value\n ini.close()", "def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def write_config_file(self):\n for opt, opt_desc in self.opt_dict.iteritems():\n if 'permanent' in opt_desc and opt_desc['permanent'] == True:\n enabled = 'Always'\n else:\n enabled = opt_desc['enabled'].__str__()\n\n self.file_parser.set(opt, 'enabled', enabled)\n self.file_parser.set(opt, 'implementation',\n opt_desc['selected_imp'])\n self.file_parser.set(opt, 'optype', opt_desc['imptype'])\n\n for config, config_desc in self.config_dict.iteritems():\n enabled = config_desc['enabled'].__str__()\n self.file_parser.set(config, 'enabled', enabled)\n\n scratch_file = self.config_filename + '.scratch'\n with open(scratch_file, 'w') as cfile:\n for config in sorted(self.config_dict.keys()):\n self.write_section(cfile, config)\n\n for opt in sorted(self.opt_dict.keys()):\n self.write_section(cfile, opt)\n\n for imp in sorted(self.imp2opt_dict.keys()):\n self.write_section(cfile, imp)\n\n cfile.write(\"\\n\")\n\n os.rename(scratch_file, self.config_filename)", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)", "def save_conf(self, name=None):\n \n if name:\n filename = name\n \n else:\n filename = \"conf_\" + str(self.conf[\"device\"]) + \"_\" + datetime.today().strftime('%Y-%m-%d') + \".txt\"\n \n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename, \"w\") as file:\n json.dump(self.conf, file)", "def save_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getSaveFileName(self,\n \"Save Config\",\n CONFIG_DIR,\n \"Config File (*.cfg)\")\n else:\n file_path = path\n self._save_state(file_path)\n self.write_text(\"Saved config @ {}\".format(file_path))", "def update(self):\n self.save_config_file()", "def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation", "def configuration(config):\n create_str_dir(config)\n add_skymap(config)\n save_configuration(config)", "def _load(self) -> list[Instrument]:\n logger.info(\"Loading config...\")\n self._config = yml.load(self.configpath)\n instruments, modespec = self._config[\"instruments\"], self._config[\"modes\"]\n logger.success(f\"Found {len(instruments)} instruments, {len(modespec)} modes\")", "def _save_state(self, config_path=\"\"):\n state = {\n 'output_dir': str(self.ui.lineEdit_output_path.text()),\n 'device': str(self.ui.comboBox_device.currentText()),\n 'event_bits': str(self.ui.lineEdit_data_bits.text()),\n 'freq': str(self.ui.lineEdit_pulse_freq.text()),\n 'labels': self.digital_labels,\n 'timestamp': self.ui.checkBox_timestamp.isChecked(),\n 'comm_interface': self.ui.checkBox_comm.isChecked(),\n 'analog_on': self.ui.checkBox_analog_channels.isChecked(),\n 'analog_channels': eval(str(self.ui.lineEdit_analog_channels.text())),\n 'analog_sample_rate': self.ui.comboBox_analog_freq.currentIndex(),\n 'analog_dtype': self.ui.comboBox_dtype.currentIndex(),\n 'analog_labels': self.analog_labels,\n 'counter_input_terminal': str(self.ui.comboBox_ci.currentText()),\n 'counter_input_on': self.ui.checkBox_ci.isChecked(),\n }\n if not config_path:\n config_path = LAST_SESSION\n with open(config_path, 'wb') as f:\n pickle.dump(state, f)", "def save_config(config_path: str, data: dict):\n with open(config_path, 'w') as j:\n dump(data,j)", "def antenny_save(self, name: str = None):\n self.antenny_config.save(name)\n self.antenny_config.save_as_default_config()\n self.imu_save(name)\n self.imu_config.save_as_default_config()\n self.azimuth_servo_save(name)\n self.elevation_servo_save(name)\n self.servo_config.save_as_default_config()", "def saveConfig(config, filepath=None):\n result = False\n if filepath is None:\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"res/\", \"config.ini\")\n try:\n with open(filepath, 'wb') as configfile:\n config.write(configfile)\n result = True\n except Exception, e:\n print \"*** Caught Exception: %r ***\" % e\n return result", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def save_opts(self):\n # save code as another folder in log_path\n dst_path = os.path.join(self.log_path, 'code', 'v0')\n iter_yes_or_no = 0\n while os.path.exists(dst_path):\n dst_path = os.path.join(self.log_path, 'code', 'v' + str(iter_yes_or_no))\n iter_yes_or_no = iter_yes_or_no + 1\n user_name = expanduser(\"~\")\n try:\n shutil.copytree(os.getcwd(), dst_path, ignore=shutil.ignore_patterns('*.pyc', 'tmp*'))\n except Exception as e_copytree:\n print(e_copytree)\n\n models_dir = os.path.join(self.log_path, \"models\")\n if not os.path.exists(models_dir):\n os.makedirs(models_dir)\n to_save = self.opt.__dict__.copy()\n\n with open(os.path.join(models_dir, 'opt.json'), 'w', encoding='utf-8') as f:\n json.dump(to_save, f, indent=2)", "def save_settings():\n\n dont_save = ['VISIONEGG_CONFIG_FILE',\n 'VISIONEGG_SYSTEM_DIR',\n 'VISIONEGG_USER_DIR',\n ]\n\n if not VisionEgg.config.VISIONEGG_CONFIG_FILE:\n raise RuntimeError(\"No config file in use.\")\n re_setting_finder = re.compile(r\"^\\s?((?:VISIONEGG_[A-Z_]*)|(?:SYNCLYNC_[A-Z_]*))\\s?=\\s?(\\S*)\\s?$\",re.IGNORECASE)\n\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"r\")\n orig_lines = orig_file.readlines()\n\n line_ending = orig_lines[0][-2:]\n if line_ending[0] not in ['\\r','\\n','\\l']:\n line_ending = line_ending[1]\n\n out_file_lines = []\n\n saved_config_vars = []\n\n for line in orig_lines:\n out_line = line # The output is the same as the input unless there's a match\n match = re_setting_finder.match(line)\n if match:\n name = match.group(1).upper()\n if name in VisionEgg.config.__dict__.keys():\n if name not in dont_save:\n # Change the output line\n out_line = (\"%s = %s\"%(name,getattr(VisionEgg.config,name,))) + line_ending\n saved_config_vars.append(name)\n out_file_lines.append(out_line)\n\n # Close and reopen orig_file in write mode\n orig_file.close()\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"w\")\n for line in out_file_lines:\n orig_file.write(line)", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))", "def save_config(conf, save_path):\n with open(os.path.join(save_path), \"w\") as f:\n f.write(yaml.dump({'param': conf}, default_flow_style=False))", "def save_config(self):\n self.config.app_w = self.width()\n self.config.app_h = self.height()\n self.config.splitter = self.splitter.saveState()\n self.config.save()", "def save_config(config):\n with open(os.path.abspath(CONFIG_PATH), 'wb') as config_file:\n pickle.dump(config, config_file)\n return config", "def save_configuration(self):\n dom = self.vistrailsStartup.startup_dom()\n doc = dom.documentElement\n configuration_element = enter_named_element(doc, 'configuration')\n doc.removeChild(configuration_element)\n self.configuration.write_to_dom(dom, doc)\n self.vistrailsStartup.write_startup_dom(dom)\n dom.unlink()", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save_config(conf, default):\n print()\n if yes_no('Would you like to save your configuration?'):\n name = simple_response(\n 'What would you like to name your configuration?')\n path = ask_path(\n 'Please enter the path you would like your configuration saved to',\n default=default)\n file_path = os.path.join(path, name)\n if file_path.find('.json') == -1:\n file_path += '.json'\n with open(file_path, 'w+') as f:\n json.dump(conf, f, indent=4)", "def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)", "def save_conf(self):\r\n self.sendAndRecv(\"SAVECONF\\r\\n\")", "def onExportConfig(self, evt):\n dlg = wx.FileDialog(self.view, \"Save As Configuration File\", wildcard = \"*.ini\" ,\n style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n if dlg.ShowModal() == wx.ID_OK:\n fileName=dlg.GetPath()\n self.config.exportConfig(fileName=fileName, e=None)", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def save_config(self):\n try:\n print(\"Clearing active users\")\n for room in self.rooms:\n room.room_attrbts['active'].clear()\n print('Saving config...')\n print(\"Known clients:\")\n self.pp.pprint(self.clients)\n print(\"Known rooms:\")\n for room in self.rooms: \n self.pp.pprint(room.name)\n self.pp.pprint(room.room_attrbts)\n path = os.environ.get('HOME') + '/.tinyserver'\n roomJSON = jsonpickle.encode(self.rooms)\n with open(path, 'w') as f:\n json.dump(roomJSON, f)\n except Exception as e:\n print(\"Error saving config!! {0}\".format(e))", "def saveConfigs(self):\r\n ctrls = self.__ctrls\r\n cache = self.__cache\r\n # inputs\r\n # cache['input_file'] = ctrls['input_file'].GetTextCtrlValue()\r\n cache['input_file'] = ctrls['input_file'].GetTextCtrlValue()\r\n cache['prmtop_file'] = ctrls['prmtop_file'].GetTextCtrlValue()\r\n cache['ip_restart_file'] = \\\r\n ctrls['ip_restart_file'].GetTextCtrlValue()\r\n # outputs\r\n cache['log_file'] = ctrls['log_file'].GetValue()\r\n cache['op_restart_file'] = ctrls['op_restart_file'].GetValue()\r\n cache['crds_file'] = ctrls['crds_file'].GetValue()\r\n cache['vels_file'] = ctrls['vels_file'].GetValue()\r\n cache['enes_file'] = ctrls['enes_file'].GetValue()", "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def save_specs(self, filename):\n pass", "def save_specs(self, filename):\n pass", "def save_config(log_dir, config):\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n shutil.copy(config, os.path.join(log_dir, 'config.gin'))", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command\n output = await self.send_command(self.cmd_save_config)\n\n # Return the commands of the configuration saving process\n return output", "def save(config, filename=None):\n filename = add_directory(filename or 'configure.json')\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory, 0o700)\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=2, sort_keys=True)", "def storeConfig(self):\n config = self.app.config['mainW']\n for driver in self.drivers:\n config[driver] = self.drivers[driver]['uiDriver'].currentIndex()\n\n return True", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save_config(_config, simulation_dir):\n with open(os.path.join(simulation_dir, 'config.yaml'), 'w') as f:\n yaml.dump(_config, f, default_flow_style=False)", "def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH", "def saveConfig(self, name=None):\n\n configDir = self.mwGlob['configDir']\n\n if self.config.get('profileName', '') == 'config':\n if 'reference' in self.config:\n del self.config['reference']\n\n # default saving for reference\n if name is None:\n name = self.config.get('reference', 'config')\n\n fileName = configDir + '/' + name + '.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n # if we save a reference first, we have to save the config as well\n if name != 'config':\n fileName = configDir + '/config.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n return True", "def save_preferences(self, path=None):\n if path is None:\n path = os.path.join(self.default_folder, self.default_file)\n\n prefs = ConfigObj(path)\n for plugin_id in self._pref_decls:\n plugin = self.workbench.get_plugin(plugin_id)\n decl = self._pref_decls[plugin_id]\n save_method = getattr(plugin, decl.saving_method)\n prefs[plugin_id] = save_method()\n\n prefs.write()", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command to ask for saving config. Wait till the question to overwrite\n # the startup file (\"Overwrite file [startup-config].... (Y/N)[N] ?\")\n output = await self.send_command(self.cmd_save_config, pattern=\"?\")\n\n # Confirm to save the config\n output += await self.send_command(\"Y\")\n\n # Return the commands of the configuration saving process\n return output", "def save_to_config(self) -> None:\n config_path = os.path.join(self.base_path, \"config.json\")\n\n with open(config_path, \"r\") as _json:\n c_dict = json.load(_json)\n\n c_dict[\"mean_similarity_error\"] = self.ME\n c_dict[\"similarity_correlation\"] = self.pearson_corr\n c_dict[\"similarity_spearman_correlation\"] = self.spearman_corr\n\n with open(config_path, \"w\") as _json:\n json.dump(c_dict, _json)", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def save(self, uri):\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)), allow_override=True)\r\n pf[os.path.basename(uri)+\"_options.json\"] = {\r\n 'input_cells': self._input_cells,\r\n 'latent_space': self._latent_space,\r\n }\r\n\r\n save_model(self._autoencoder, uri+\"_lstm_autoencoder.hdf5\")\r\n save_model(self._encoder, uri+\"_lstm_encoder.hdf5\")", "def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()", "def __write_ana_config(self, path, macros):\n self.__write_fw_config(path=path, template_name=\"analog_settings.template\", macros=macros)", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def save(self) -> None:\n self._client.save_config()", "def save_config(self, save_path: str) -> None:\n os.makedirs(save_path, exist_ok=True)\n model_hyperparameters_path = os.path.join(save_path, MODEL_HYPERPARAMETERS_FILE_NAME)\n save_json(model_hyperparameters_path, self.config_obj.to_dict())", "def save_config(config: Dict[str, Any], path: str) -> None:\n\n with open(path, 'w+', encoding='utf-8') as stream:\n yaml.dump(config, stream, default_flow_style=False)", "def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)", "def save_conf(conf: m.Image) -> None:\n # Determine the name of the config file.\n parts = conf.filename.split('.')[:-1]\n parts.append('json')\n conffile = '.'.join(parts)\n\n # Serialize the config.\n confmap = {\n 'Version': __version__,\n 'Image': conf.asdict(),\n }\n confjson = json.dumps(confmap, indent=4)\n\n # Save the config.\n with open(conffile, 'w') as fh:\n fh.write(confjson)", "def _save(self):\n file = open(\"settings.ini\", \"w\")\n self._parser.write(file)\n file.close()", "def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()", "def save_iantconfig(self, filename_root):\n x, y, z = self.get_coords_enu()\n d = np.ones_like(x) # set to 1 to avoid shadowing in iantconfig\n coords = np.vstack([d, x, y, z]).T\n coords = np.vstack([coords, [0, self.lon_deg, self.lat_deg, 0]])\n np.savetxt('%s.enu.%ix%i.txt' % (filename_root, x.size, 4),\n coords, fmt=b'%-5i %.12f %.12f %.12f')", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def save_settings(path, server, station):\n db.save_data(path, server, station)" ]
[ "0.69961077", "0.6905757", "0.67979085", "0.6779115", "0.663721", "0.6624185", "0.65824383", "0.6569083", "0.6564401", "0.6522336", "0.6489692", "0.646409", "0.6439854", "0.6433161", "0.637511", "0.63696384", "0.635844", "0.63290244", "0.6287945", "0.62523466", "0.62460214", "0.6223467", "0.6212512", "0.62030524", "0.61702484", "0.61660534", "0.6145498", "0.61387694", "0.61150604", "0.6108188", "0.6106222", "0.60989314", "0.6056947", "0.6002333", "0.600038", "0.59870327", "0.5966106", "0.59608066", "0.5954657", "0.59528136", "0.59497964", "0.59484977", "0.59305274", "0.5918328", "0.5915777", "0.5913141", "0.5878504", "0.5855133", "0.58507246", "0.5833274", "0.5819746", "0.58178157", "0.58130544", "0.5793689", "0.57824713", "0.57771766", "0.57747614", "0.5773099", "0.5772615", "0.5755837", "0.57522714", "0.57424635", "0.573362", "0.57137305", "0.5710153", "0.57099193", "0.5704594", "0.5702872", "0.57003886", "0.5694628", "0.5689682", "0.5689682", "0.56720215", "0.56589407", "0.5648019", "0.5642906", "0.56279194", "0.5619065", "0.5607385", "0.5598558", "0.55919886", "0.5583519", "0.5577158", "0.5574596", "0.556888", "0.5557109", "0.5551564", "0.5551492", "0.55501753", "0.5549369", "0.55296916", "0.5527461", "0.5515508", "0.55141634", "0.55088764", "0.5507245", "0.5492734", "0.54877007", "0.5480388", "0.54614127" ]
0.5880172
46
Disconnect instruments and shutdown daemon
def shutdown(self) -> None: logger.info("Disconnecting instruments...") for instrument in self._config["instruments"]: instrument.disconnect() logger.info(f"Shutting down {self}...") self._daemon.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def shutdown(self):", "def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Termination successful')", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def shutdown(self):\n ...", "def test_disconnect(self):\n self.inverter.disconnect()\n sleep(0.02)", "def initiate_shutdown(self) -> None:", "def device_disconnect(self):\n pass", "def close(self):\n logger.warning('Shutting down')\n self.display.off()\n self.mqtt.disconnect()", "def shutdown(self):\t\r\n\t\tself.is_running = False\r\n\t\tfor connection in self.established_connection_list:\r\n\t\t\tconnection.send('The server has been shutdown adruptly by the server owner.\\n')\r\n\t\t\tconnection.socket_send()", "def shutdown(self):\n try:\n self.driver.stop()\n except:\n logging.exception(\"Could not stop driver on shutdown\")\n\n self.arduino.stop()", "def disconnectAllServers():\n _disconnectAllServers()", "def disconnect(self):", "def Disconnect_from_ePCSim_Server(ePCSim_conn):\r\n ePCSim_conn.Disconnect()", "async def shutdown(self) -> int:", "async def shutdown(self):", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def _shutdown(self):", "def signal_handler(*args):\n if station:\n station.shutdown()", "def shutdown(self):\n\n raise NotImplementedError", "def disconnect(self):\n self.arduino.close()\n self.arduino = None", "def shutdown_all(self, now=False):", "def shutdown(self):\n\n pass", "def disconnect(self) -> None:\n ...", "def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()", "def _stop(self):\n\n if self._daemon_id:\n pyro_proxy_name = 'PySwitchLib.' + self._daemon_id\n uri = None\n\n try:\n with Pyro4.locateNS(host='localhost', port=self._pyro_ns_port) as ns:\n try:\n uri = ns.lookup(pyro_proxy_name)\n except:\n pass\n\n if uri:\n ns.remove(pyro_proxy_name)\n except:\n pass\n finally:\n ns_daemon_dict = ConfigFileUtil().read(filename=pyswitchlib_ns_daemon_file)\n\n if self._daemon_id in ns_daemon_dict:\n uri = ns_daemon_dict[self._daemon_id]\n del ns_daemon_dict[self._daemon_id]\n\n if len(ns_daemon_dict):\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=ns_daemon_dict, do_merge=False)\n else:\n try:\n os.unlink(pyswitchlib_ns_daemon_file)\n except:\n pass\n\n if uri:\n try:\n with Pyro4.Proxy(uri) as pyro_proxy:\n pyro_proxy.shutdown()\n pyro_proxy._pyroRelease()\n except:\n pass\n\n super(PySwitchLibApiDaemonRunner, self)._stop()", "def disconnect(self):\n pass", "def disconnect(self):\n pass", "def disconnect(self):\n pass", "def disconnect(self):\n pass", "def disconnect(self):\n pass", "async def on_disconnect(self) -> None:", "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def action_exit():\n modTimer.stop_all()\n mqtt_publish_lwt(iot.Status.OFFLINE)\n mqtt.disconnect()", "def disconnect(self):\r\n # TODO: Should destroy all instances which have been started dynamically\r\n raise NotImplementedError", "def perform_teardown():\n global credentials, connection, channel\n connection.close()", "def stopEngines():\n pass", "def test_disconnect_multiple(self):\n self.inverter.disconnect()\n self.inverter.disconnect() # Should not raise exception", "def disconnect(self):\n self.stop()\n self._send_command('exit')\n self.sock.close()\n self.disconnected = True", "def _disconnect(self):\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"disconnect\"})\n self.socket.close()\n self.context.term()\n exit()", "async def callback_homeassistant_stop(self, event: \"Event\") -> NoReturn:\n _LOGGER.debug(\"Hekr system is shutting down\")\n for device_id, device in self.devices.items():\n connector = device.connector\n listener = connector.listener\n if listener is not None and listener.is_running:\n _LOGGER.debug('Shutting down listener for device ID \"%s\"' % device_id)\n listener.stop()\n\n if connector.is_connected:\n _LOGGER.debug('Shutting down connector for device ID \"%s\"' % device_id)\n await connector.close_connection()", "def disconnect(self) -> None:\n try:\n self.s.close()\n except OSError as e:\n logging.exception(e)", "def cleanup():\n broadcast_proc.terminate()\n subprocess.call('sudo hciconfig hci0 noleadv', shell=True)\n if CELL:\n ser_command('Cell off', cell_ser)\n cell_ser.close()\n grovepi.digitalWrite(LED, 0)", "def shutdown(bot):\n setup(bot)", "def shutdown(self):\n\n self.active = False\n\n try:\n self.listen_socket.shutdown(socket.SHUT_RDWR)\n except:\n self.logger.info(\"Ignoring listen soc shutdown error\")\n self.listen_socket = None\n\n with self.connect_cv:\n self.connect_cv.notifyAll()\n\n self.wakeup()\n self.dbg_state = \"down\"", "async def disconnect(self):", "def daemonControlStop (self):\n self.stop()", "def plugin_shutdown():\n collectd.info('Shutting down collectd-mlab plugin.')", "def terminateDaemon( self ):\n self._connection.terminate()\n print \"'+++ JPy/sessionended/\"\n sys.stdout = self.stdout\n sys.stdin = self.stdin\n print \"deamon ended\\n\"\n sys.exit()", "def Shutdown(self):\n pass", "def Shutdown(self):\n pass", "def Stop_ePCSim(ePCSim_conn):\r\n ePCSim_conn.SendCmd(\"exit\")\r\n ePCSim_conn.prompt = \"#\"\r\n ePCSim_conn.SendCmd(\"pkill egate\")\r\n ePCSim_conn.SendCmd(\"pkill edaemon\")", "def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()", "def stop(self):\n shutdown_url = self._env[\"DATASTORE_HOST\"] + \"/shutdown\"\n req = urllib.request.Request(shutdown_url, method=\"POST\")\n urllib.request.urlopen(req)", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "def shutdown(self):\n self._send_command('shutdown')\n self.sock.close()\n self.disconnected = True", "def shutdown_plugin(self):\n pass", "def _disconnect(self):\n self._factory.stopTrying()\n self._connection.disconnect()", "def stop(self) -> None:\n self.mqttc.disconnect()", "def cleanup_and_exit():\n logger.warn(\"Terminating the program\")\n try:\n for key in connections:\n try:\n connections[key].disconnect()\n except AttributeError:\n pass\n for s in sensors:\n try:\n sensors[s].cleanup()\n except AttributeError:\n pass\n except:\n pass\n sys.exit(0)", "def stop(self):\n self.disconnect()", "def stop(self):\n self.disconnect()", "def _plugin_stop(handle):\n GPIO.cleanup()\n _LOGGER.info('MAX31865 (async) Disconnected.')", "def teardown(self):\n self.loop.run_until_complete(self.prometheus_con.disconnect())", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def stop(self,dummy=None):\n if DEBUG: print \"Closing Python Manager\"\n self.flush_all()\n for key in self.connection_manager.peer_servers.keys():\n self.connection_manager.peer_servers[key].protocol.sendClose()\n for key in self.connection_manager.script_servers.keys():\n self.connection_manager.script_servers[key].protocol.sendClose()\n for key in self.connection_manager.data_gui_servers.keys():\n self.connection_manager.data_gui_servers[key].protocol.sendClose()\n self.close_all()\n #self.connection_manager.laud.loseConnection()\n if DEBUG: print self.connection_manager.is_connections_closed()\n reactor.stop()\n if DEBUG: print \"Done\"", "def shutdown(self):\n self.action('shutdown')", "def shutdown(self) -> None:\n pass", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def stop(self):\n self._listeners = None\n\n try:\n if self._started_daemon:\n logging.info('Stopping Transmission daemon')\n exec_cmd(['transmission-remote', '--exit'], wait_after=2)\n\n except subprocess.CalledProcessError:\n logging.error('Unable to stop daemon')\n logging.debug('Error details', stack_info=True, exc_info=True)\n\n self._done = True", "def disconnect():\n logging.info('Client disconnected')", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.server.stop()\n self.driver.quit()", "def stop():\n rdebug(\"storpool-common.stop invoked\")\n\n rdebug(\"letting storpool-config know\")\n run_config.stop()", "def close(self):\n self.controller.DisableDevice()\n self.controller.StopPolling()\n self.controller.Disconnect(False)", "def disconnect(self):\n _abstract()", "def disconnect(self):\n _abstract()", "def disconnect(self):\n\n\t\tself.send(\"QUIT\", True)\n\n\t\tif self.s is not None:\n\t\t\tself.s.close()", "def Shutdown(self):\n self.conn.send(False)\n self.process.join()", "def stop(self):\n self.stopped = True\n self.broadcast('host down')", "def disconnect(self):\n # Nothing to do\n pass", "def cleanup(self):\n if self.vehicle:\n print(f\"Disconnecting from vehicle on {self._connection_string}\")\n self.vehicle.close()\n if self._sitl:\n print(\"Shutting down SITL instance\")\n self._sitl.stop()", "def __shut_down(self):\n\n for line, buses in self.__bus_dict.items():\n for bus in buses:\n bus.send_to_bus(\"Server Shut Down\")", "def _disconnect(self) -> None:\n self._agent.stop()\n self._agent_thread.join()", "def atexit(self):\n self.stop_listen()\n for driver in self.drivers.values():\n driver.stop()\n if hasattr(driver, \"atexit\"):\n driver.atexit()\n try:\n self.processor_thread.join()\n except AttributeError:\n pass", "def shutdown(self):\n raise NotImplementedError", "def terminate():\n leds.on()\n time.sleep(1)\n leds.off()\n\n GPIO.cleanup()", "def _shutdown(self, signum=None, frame=None):\n if not self.shutdown_requested:\n self.logger.info(\"shutdown initiated\")\n\n self._shutdown_requested = True\n if self.connected:\n self.disconnect()\n self.stop_listening()", "def tear_down(self):\n LOGGER.info('Tearing down plugin service...')\n self.reset()\n self.server.stop(grace=None)\n\n if self.device_proxy:\n self.device_proxy.tear_down()", "async def shutdown() -> None:\n await database.disconnect()", "def bcp_goodbye(self, **kwargs):\n if self.config['mediacontroller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()", "def shutup():\n try:\n ttsEng.shutup()\n except Exception, e:\n logging.error(e)", "def shutdown(self):\n\n if self.sessionState in (JT808SessionStates.OPEN,):\n self.log.warning(\"Shutdown requested...disconnecting\")\n self.disconnect()\n else:\n self.log.debug(\"Shutdown already in progress\")", "def disconnect():\n return c.close()", "def shutdown(self,):\n if hasattr(self, 'gpu_process_ids'):\n for ppid in self.gpu_process_ids:\n kill_proc_tree(ppid)\n for dispatcher in self.dispatchers:\n silence_function(1, dispatcher.shutdown)\n print('DistributedBackend shutdown.')", "def shutdown(self):\n print \"Plugin is shutting down.\"", "def __del__(self):\n self.shutdown()" ]
[ "0.67708665", "0.6769665", "0.6741867", "0.66888916", "0.66888916", "0.6571312", "0.65665215", "0.6500033", "0.64636064", "0.64133114", "0.6387295", "0.6361316", "0.63584465", "0.6352385", "0.6346766", "0.6346196", "0.6339963", "0.63314235", "0.63314235", "0.63314235", "0.6310234", "0.63076043", "0.6282716", "0.62790745", "0.62790096", "0.62705433", "0.6265983", "0.6265871", "0.62589806", "0.6241178", "0.6241178", "0.6241178", "0.6241178", "0.6241178", "0.6203971", "0.62030715", "0.6199466", "0.61959845", "0.6192926", "0.6191811", "0.6189165", "0.61840355", "0.61686563", "0.6161696", "0.61610687", "0.6160541", "0.61557364", "0.6148195", "0.61478555", "0.61372524", "0.6137132", "0.6128593", "0.6125277", "0.6125277", "0.6124497", "0.6121725", "0.6119041", "0.6117649", "0.6117649", "0.61163825", "0.6105023", "0.6097425", "0.60950065", "0.60940534", "0.6092371", "0.6092371", "0.6084466", "0.608349", "0.60823256", "0.60631365", "0.60629165", "0.6047397", "0.60452247", "0.6037044", "0.603304", "0.6021318", "0.60146683", "0.6012993", "0.600724", "0.600724", "0.6001238", "0.59986097", "0.59953946", "0.59909564", "0.59805524", "0.59770966", "0.5976663", "0.597399", "0.59671664", "0.59657985", "0.5946471", "0.5943132", "0.5933771", "0.5930338", "0.5928991", "0.59255075", "0.5924221", "0.59208184", "0.5912063", "0.59090465" ]
0.7881212
0
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
def test_lineno_failcase_called_code(): text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call the failing code >>> func(3) """ if a > 0: nested_failure(a) return a def nested_failure(a): if a > 0: nested_failure(a - 1) else: raise Exception('fail case') ''')) assert 'rel: 6, abs: 9,' in text assert text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_error(doctest):", "def test_expt(doctest):", "def test_exp(doctest):", "def testit(did_pass):\n\n # This function works correctly--it is verbatim from the text, chapter 6\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test_error_control(testdir):\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest('--pylint', '--pylint-error-types=EF')\n assert '1 passed' in result.stdout.str()", "def testit(did_pass):\n\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def _test():\n\n # perform doctest\n import sys\n import doctest\n\n doctest.testmod()\n\n sys.exit(0)", "def test_edge_cases(doctest):", "def test_expected_failures(modpath, expected_failure):\n code = os.path.dirname(expected_failure)\n retcode, out = flake8(join(modpath, expected_failure))\n assert retcode, \"expected failure (%s), got success\" % code\n needle = \": %s \" % code\n assert needle in out\n\n with open(os.path.join(modpath, expected_failure)) as f:\n doc = ast.get_docstring(\n ast.parse(f.read(), expected_failure),\n clean=True,\n )\n\n # keep \"literal\" lines, skip shell lines\n result_check = \"\".join(\n line + \"\\n\" for line in doc.splitlines() if line.startswith(\" RST\")\n )\n if result_check:\n modpath = os.path.join(modpath, \"\")\n assert out.replace(modpath, \" \") == result_check", "def _test():\n import doctest", "def test_basic(testdir):\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest('--pylint')\n assert 'Missing module docstring' in result.stdout.str()\n assert 'Unused import sys' in result.stdout.str()\n assert 'Final newline missing' in result.stdout.str()\n assert 'passed' not in result.stdout.str()", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "def _test():\n import doctest\n doctest.testmod(verbose=1)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test(cline):\n print(\"Running unit tests.\")\n cline.run(\"TF_CPP_MIN_LOG_LEVEL=3 python3 -m unittest\")", "def test_simple_pytest_import_error_cli():\n module_text = utils.codeblock(\n '''\n # There are lines before the bad line\n import os\n import sys\n import does_not_exist\n\n def module_func1():\n \"\"\"\n This module has a doctest\n\n Example:\n >>> print('hello world')\n \"\"\"\n ''')\n temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod')\n command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest-supress-import-errors --xdoctest ' + temp_module.dpath\n print(command)\n print('--')\n info = cmd(command)\n print('--')\n # print('info = {}'.format(info))\n print(info['out'])\n # We patched doctest_example so it no longer outputs this in the traceback\n assert 'util_import' not in info['out']\n print(info['out'])\n # Note: flaky changes the return code from 1 to 3, so test non-zero\n assert info['ret'] != 0\n\n # Remove the supress import error flag and now we should get the traceback\n temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod')\n command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest ' + temp_module.dpath\n print(command)\n print('--')\n info = cmd(command)\n print('--')\n # print('info = {}'.format(info))\n print(info['out'])\n # We patched doctest_example so it no longer outputs this in the traceback\n assert 'util_import' in info['out']\n print(info['out'])\n # Note: flaky changes the return code from 1 to 3, so test non-zero\n assert info['ret'] != 0", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test_pylint_works(capsys: \"CaptureFixture\") -> None:\n # Pass one file with absolute path and the other one with relative path\n notebook1 = os.path.join(\"tests\", \"data\", \"notebook_for_testing.ipynb\")\n notebook2 = os.path.join(\"tests\", \"data\", \"notebook_with_indented_magics.ipynb\")\n\n main([\"pylint\", notebook1, notebook2, \"--disable=C0114\"])\n\n # check out and err\n out, _ = capsys.readouterr()\n\n expected_out = (\n \"************* Module tests.data.notebook_for_testing\\n\" # noqa: E501\n f\"{notebook1}:cell_2:19:8: C0303: Trailing whitespace (trailing-whitespace)\\n\" # noqa: E501\n f\"{notebook1}:cell_2:15:11: C0209: Formatting a regular string which could be a f-string (consider-using-f-string)\\n\" # noqa: E501\n f'{notebook1}:cell_4:1:0: C0413: Import \"from random import randint\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f'{notebook1}:cell_5:1:0: C0413: Import \"import pprint\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f'{notebook1}:cell_5:2:0: C0413: Import \"import sys\" should be placed at the top of the module (wrong-import-position)\\n' # noqa: E501\n f\"{notebook1}:cell_1:1:0: W0611: Unused import os (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_1:3:0: W0611: Unused import glob (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_1:5:0: W0611: Unused import nbqa (unused-import)\\n\" # noqa: E501\n f\"{notebook1}:cell_4:1:0: W0611: Unused randint imported from random (unused-import)\\n\" # noqa: E501\n f'{notebook1}:cell_4:1:0: C0411: standard import \"from random import randint\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n f'{notebook1}:cell_5:1:0: C0411: standard import \"import pprint\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n f'{notebook1}:cell_5:2:0: C0411: standard import \"import sys\" should be placed before \"import nbqa\" (wrong-import-order)\\n' # noqa: E501\n \"************* Module tests.data.notebook_with_indented_magics\\n\" # noqa: E501\n f\"{notebook2}:cell_1:1:0: W0611: Unused randint imported from random (unused-import)\\n\" # noqa: E501\n f\"{notebook2}:cell_1:2:0: W0611: Unused get_ipython imported from IPython (unused-import)\\n\" # noqa: E501\n f'{notebook2}:cell_3:3:0: C0411: standard import \"import operator\" should be placed before \"from IPython import get_ipython\" (wrong-import-order)\\n' # noqa: E501\n \"\\n\"\n \"-----------------------------------\\n\"\n \"Your code has been rated at 4.32/10\\n\"\n \"\\n\"\n )\n horizontal_bar = \"-----------------------------------\"\n assert out.split(horizontal_bar)[0] == expected_out.split(horizontal_bar)[0]", "def unitdoctest():\r\n\r\n pass", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def _test(): # pragma: no cover\r\n print('Starting doctest')\r\n doctest.testmod()\r\n print('Completed doctest')", "def test_noop_function_call(self) -> None:\n before = after = \"\"\"\n for line in xreadlines(r):\n print(line)\n \"\"\"\n self.assertCodemod(before, after)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test_comparison_doctest(doctest):", "def def_textface_doctests():", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def test_basic():\n line = \"test\"\n assert wrap_line(line) == \"test\"", "def _test():\n import doctest\n doctest.testmod()", "def _test():\n import doctest\n doctest.testmod()", "def test_xfailed_but_passed():\n pass", "def test_doctests_run(self):\n results = doctest.testmod(lab, optionflags=TESTDOC_FLAGS, report=False)\n self.assertEqual(results[0], 0)", "def test_compiler_arguments_fist_line(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n lines.last.return_value = None\n with raises(StorySyntaxError):\n compiler.arguments(tree, '0')\n error = 'arguments_noservice'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def test_zero(doctest):", "def main():\n import argparse\n from xdoctest import utils\n description = utils.codeblock(\n '''\n discover and run doctests within a python package\n ''')\n\n parser = argparse.ArgumentParser(prog='python -m xdoctest', description=description)\n parser.add_argument('modname', help='what files to run')\n parser.add_argument('command', help='a doctest name or a command (list|all)', default='list')\n parser.add_argument(*('--style',), type=str, help='choose your style',\n choices=['auto', 'google', 'freeform'], default='auto')\n parser.add_argument(*('--options',), type=str,\n help='specify the default directive state',\n default=None)\n parser.add_argument(*('--offset',), dest='offset_linenos', action='store_true',\n help=('Doctest outputs will display line numbers '\n 'wrt to the source file.'))\n\n args, unknown = parser.parse_known_args()\n ns = args.__dict__.copy()\n\n # ... postprocess args\n modname = ns['modname']\n command = ns['command']\n style = ns['style']\n offset_linenos = ns['offset_linenos']\n\n if ns['options'] is None:\n from os.path import exists\n ns['options'] = ''\n if exists('pytest.ini'):\n from six.moves import configparser\n parser = configparser.ConfigParser()\n parser.read('pytest.ini')\n try:\n ns['options'] = parser.get('pytest', 'xdoctest_options')\n except configparser.NoOptionError:\n pass\n\n from xdoctest.directive import parse_directive_optstr\n default_runtime_state = {}\n for optpart in ns['options'].split(','):\n if optpart:\n directive = parse_directive_optstr(optpart)\n if directive is not None:\n default_runtime_state[directive.name] = directive.positive\n\n # Specify a default doctest_example.Config state\n config = {\n 'default_runtime_state': default_runtime_state,\n 'offset_linenos': offset_linenos,\n }\n\n import xdoctest\n xdoctest.doctest_module(modname, argv=[command], style=style,\n config=config)", "def _expected_lines_and_line_numbers(path, check_prefix):\n with open(path) as f:\n for index, line in enumerate(f):\n if 'RUN:' in line:\n # Ignore lit directives, which may include a call to\n # xctest_checker that specifies a check prefix.\n continue\n\n # Note that line numbers are not zero-indexed; we must add one to\n # the loop index.\n line_number = index + 1\n\n components = line.split(check_prefix)\n if len(components) == 2:\n yield (replace_offsets(components[1].strip(), line_number),\n line_number)\n elif len(components) > 2:\n # Include a newline, then the file name and line number in the\n # exception in order to have it appear as an inline failure in\n # Xcode.\n raise XCTestCheckerError(\n path, line_number,\n 'Usage violation: prefix \"{}\" appears twice in the same '\n 'line.'.format(check_prefix))", "def test_remainder(doctest):", "def test():\n import doctest\n doctest.testmod(verbose=0)\n test_int()\n test_tuple()", "def test_compiler_arguments_not_execute(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n patch.object(Objects, 'arguments')\n lines.last.return_value = '1'\n lines.lines = {'1': {'method': 'whatever'}}\n with raises(StorySyntaxError):\n compiler.arguments(tree, '0')\n error = 'arguments_noservice'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def test_noop_wrong_name(self) -> None:\n before = after = \"\"\"\n for line in r.xreadlines():\n print(line)\n \"\"\"\n self.assertCodemod(before, after)", "def scope_doctest():\n pass", "def test_compiler_indented_chain_first_line(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n lines.last.return_value = None\n with raises(StorySyntaxError):\n compiler.indented_chain(tree, '0')\n error = 'arguments_nomutation'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python", "def test_doc_code_cells(fname, globalns=globals()):\n text = Path(fname).read_text()\n code_cells = re.findall(r\"```{code-cell}[^\\n]+\\n(.*?)`{3}\", text, re.S)\n for cell in code_cells:\n header = re.search(r\"-{3}(.+?)-{3}\", cell, re.S)\n if header:\n cell = cell.replace(header.group(), \"\")\n if \"warns\" in header.group():\n with pytest.warns(None):\n exec(cell, globalns)\n continue\n if \"raises-exception\" in header.group():\n with pytest.raises(Exception):\n exec(cell, globalns)\n continue\n exec(cell, globalns)", "def test_expected_successes(modpath):\n retcode, out = flake8(join(modpath, \"test_cases\"))\n assert not retcode, out", "def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)", "def test_start_of_line_2(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.1\", \"3.1\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"start-of-line\",\n )", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)", "def test_main():\n # Setup\n # Exercise\n # Verify", "def test_next_line(self):\n before_b = \"\"\"\\\n a\n\n b\n \"\"\"\n after_b = \"\"\"\\\n a\n\n b\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.1\", \"1.1\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"next-line\",\n )", "def test_T01():", "def test_4():", "def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)", "def test_3():", "def test_5():", "def test_for_mistake(func, *args, **kw):\n\n global global_fail_counter\n global global_success_counter\n\n # print test number\n test_num = global_fail_counter + global_success_counter\n print('Test # %d: ' % test_num, end='')\n #print('Test # %d: ' % test_num)\n\n # Run function\n obj = func(*args, **kw)\n # Increment appropriate counter\n if obj.mistake:\n global_fail_counter += 1\n else:\n global_success_counter += 1", "def test_start_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.10\", \"3.10\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"start-of-line\",\n )", "def theTests(path_to_code_to_check=\"../me\"):\n print(\"\\nWelcome to the exam!\")\n print(\"May the odds be ever in your favour.\\nEspecially today!\")\n\n if ex_runs(path_to_code_to_check, exerciseNumber=1, weekNumber=SET_NUMBER):\n exam = loadExerciseFile(\n path_to_code_to_check, weekNumber=SET_NUMBER, exerciseNumber=1\n )\n\n # testResults.append(test(test_flake8(ex1path), \"pass the linter\"))\n exam_test(\n True,\n [],\n exam.string_please,\n finishing_function=lambda x: type(x) is str,\n extra_message=\"Don't over think this! just return a string!\",\n )\n exam_test(\n True,\n [],\n exam.list_please,\n finishing_function=lambda x: type(x) is list,\n extra_message=\"Don't over think this! just return a list!\",\n )\n exam_test(\n True,\n [],\n exam.dictionary_please,\n finishing_function=lambda x: type(x) is dict,\n extra_message=\"Don't over think this! just return a dictionary!\",\n )\n exam_test(True, [5], exam.is_it_5)\n exam_test(False, [4], exam.is_it_5)\n exam_test(False, [\"cats\"], exam.is_it_5)\n exam_test(0, [5], exam.take_five)\n exam_test(5, [10], exam.take_five)\n exam_test(-5, [0], exam.take_five)\n\n exam_test(\"Hello the Queen\", [\"the Queen\"], exam.greet)\n exam_test(\"Hello Pr♂nc♀♂\", [\"Pr♂nc♀♂\"], exam.greet)\n\n exam_test(4, [[3, 3, 3, 3, 1]], exam.three_counter)\n exam_test(0, [[0, 1, 2, 5, -9]], exam.three_counter)\n\n exam_test(2, [7], exam.n_counter)\n exam_test(5, [0, [0, 0, 0, 0, 0, [0]]], exam.n_counter)\n\n # fmt: off\n fizza = [\n 1, 2, \"Fizz\", 4, \"Buzz\", \"Fizz\", 7, 8, \"Fizz\", \"Buzz\", 11, \"Fizz\", \n 13, 14, \"FizzBuzz\", 16, 17, \"Fizz\", 19, \"Buzz\", \"Fizz\", 22, 23, \n \"Fizz\", \"Buzz\", 26, \"Fizz\", 28, 29, \"FizzBuzz\", 31, 32, \"Fizz\", 34, \n \"Buzz\", \"Fizz\", 37, 38, \"Fizz\", \"Buzz\", 41, \"Fizz\", 43, 44, \n \"FizzBuzz\", 46, 47, \"Fizz\", 49, \"Buzz\", \"Fizz\", 52, 53, \"Fizz\", \n \"Buzz\", 56, \"Fizz\", 58, 59, \"FizzBuzz\", 61, 62, \"Fizz\", 64, \"Buzz\", \n \"Fizz\", 67, 68, \"Fizz\", \"Buzz\", 71, \"Fizz\", 73, 74, \"FizzBuzz\", 76, \n 77, \"Fizz\", 79, \"Buzz\", \"Fizz\", 82, 83, \"Fizz\", \"Buzz\", 86, \"Fizz\", \n 88, 89, \"FizzBuzz\", 91, 92, \"Fizz\", 94, \"Buzz\", \"Fizz\", 97, 98, \n \"Fizz\", \"Buzz\", ]\n # fmt: on\n exam_test(fizza, [], exam.fizz_buzz)\n\n exam_test(\n \"|a| |s|e|r|i|a|l| |k|i|l|l|e|r|\", [\"a serial killer\"], exam.put_behind_bars\n )\n exam_test(\"|a| |b|a|r|t|e|n|d|e|r|\", [\"a bartender\"], exam.put_behind_bars)\n\n exam_test([\"red fox\"], [\"x\"], exam.pet_filter)\n exam_test([], [\"q\"], exam.pet_filter)\n exam_test(\n [\"pig\", \"sheep\", \"guinea pig\", \"pigeon\", \"alpaca\", \"guppy\"],\n [\"p\"],\n exam.pet_filter,\n )\n\n exam_test(\"e\", [], exam.best_letter_for_pets)\n\n word_lengths = [[3, 3, 3], [4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]]\n exam_test(\n word_lengths,\n [],\n exam.make_filler_text_dictionary,\n lambda x: [[len(w) for w in x[k]] for k in x.keys()],\n )\n\n exam_test(\n True,\n [50],\n exam.random_filler_text,\n lambda x: len(x.split(\" \")) == 50 and len(x) > 3 * 50,\n )\n\n exam_test(\n True,\n [1000],\n exam.random_filler_text,\n lambda x: len(x.split(\" \")) == 1000 and len(x) > 3 * 1000,\n )\n\n clean_out_old_env()\n\n exam_test(\n True,\n [100],\n exam.fast_filler,\n lambda x: len(x.split(\" \")) == 100 and len(x) > 3 * 100,\n # chdir=True, # NFI what this does :(\n )\n\n # exam_test(True, [\"./week8/dict_racey.json\"], os.path.exists)\n\n exam_test(\n True,\n [10],\n exam.fast_filler,\n lambda x: x[0] in string.ascii_uppercase and x[1] in string.ascii_lowercase,\n \"Test if fast_filler is capitalised\",\n )\n exam_test(\n True,\n [10],\n exam.fast_filler,\n lambda x: x[-1] == \".\",\n \"Test if fast_filler finishes with a .\",\n )\n\n print(\n \"The point of saving the dictionary is that it's fast!\",\n \"The pattern of saving a value locally so that you don't\",\n \"need to go and get it is called caching.\",\n \"This test runs fast_filler 10 times, and if it manages it in less\",\n \"than a second, then you're good to go!\",\n sep=\"\\n\",\n )\n try:\n TIMEOUT_IN_SECONDS = 1\n func_timeout(\n TIMEOUT_IN_SECONDS,\n lambda: [exam.fast_filler(1000) for _ in range(10)],\n args=[],\n )\n testResults.append(test(True, \"subsequent fast_filler\"))\n except FunctionTimedOut as t:\n m = (\n \"Timed out trying to run fast filler 10 times in 1 second, \"\n \"subsequent fast_filler probably wasn't fast enough\"\n )\n print(m, str(t))\n testResults.append(test(False, m + str(t)))\n except Exception as e:\n testResults.append(test(False, \"subsequent fast_filler failed: \" + str(e)))\n\n message = (\n \"Cowabunga! You've got all the tests passing!\\n\"\n \"Well done, that's all the exercises for this term out of the way!\"\n )\n print(testResults)\n return finish_up(testResults, message, nyan_cat())", "def test_previous_line(self):\n before_b = \"\"\"\\\n a\n\n b\n \"\"\"\n after_b = \"\"\"\\\n a\n\n b\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"3.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"previous-line\",\n )", "def test_passed():\n pass", "def test():\n import doctest\n from . import locate\n return doctest.testmod(locate)", "def test_sqrt(doctest):", "def test_03_pass(self):\n if x==1:\n pass", "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_twentythree_no_args():\n sys.argv = ['test']\n with pytest.raises(SystemExit):\n TwentyThree()", "def test_shift(doctest):", "def test_move_lines_up_into_docstring(self):\n before_b = '''\\\n #@@language python\n def test():\n \"\"\" a\n b\n c\n \"\"\"\n print 1\n \n print 2\n '''\n after_b = '''\\\n #@@language python\n def test():\n \"\"\" a\n b\n c\n print 1\n \"\"\"\n \n print 2\n '''\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.1\", \"7.1\"),\n after_sel=(\"6.1\", \"6.1\"),\n command_name=\"move-lines-up\",\n )", "def test_abs(doctest):", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def test_compiler_return_statement(patch, compiler, lines, tree):\n tree.expression = None\n compiler.return_statement(tree, '1')\n line = tree.line()\n kwargs = {'args': None, 'parent': '1'}\n lines.append.assert_called_with('return', line, **kwargs)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def __integration_doctest():\n pass", "def test_02_pass(self):\n if x==1:\n pass", "def lloc():\n load_env_vars('dev')\n from tools.static_code_analysis import LogicalLinesOfCode\n radon_raw = LogicalLinesOfCode()\n score = radon_raw.run_test()\n radon_raw.create_badge(score)", "def test_compiler_indented_chain_not_mutation(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n patch.object(Compiler, 'chained_mutations')\n lines.last.return_value = '1'\n lines.lines = {'1': {'method': 'whatever'}}\n with raises(StorySyntaxError):\n compiler.indented_chain(tree, '0')\n error = 'arguments_nomutation'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def test_main_succeed_en(runner: CliRunner) -> None:\n result = runner.invoke(__main__.main, \"-c tests/clippings-en.txt\")\n assert result.exit_code == 0", "def main(argv=None):\n if argv is None:\n argv = sys.argv\n\n import doctest\n verbose = (len(argv) >= 2 and argv[1] == \"--Test\")\n if verbose:\n print(\"Running doctests (verbose mode)\")\n else:\n print(\"Running doctests\")\n doctest.testmod(verbose=verbose)\n return 0", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_kill_line_start_line(self):\n before_b = \"\"\"\\\n line 1\n line 2\n line 3\n line 4\n \"\"\"\n after_b = \"\"\"\\\n line 1\n line 2\n\n line 4\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"3.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"kill-line\",\n )", "def test_get_lines(self):\r\n \r\n self.assertEqual(get_lines(\"test_20191206_1140000.txt\", os.getcwd()),\r\n 299,\r\n \"Should be equal number of lines.\")", "def test_pylintrc_file(testdir):\n rcfile = testdir.makefile('rc', \"\"\"\n[FORMAT]\n\nmax-line-length=3\n\"\"\")\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest(\n '--pylint', '--pylint-rcfile={0}'.format(rcfile.strpath)\n )\n assert 'Line too long (10/3)' in result.stdout.str()", "def test(self):\n self.skipped_test('doctest module has no DocTestSuite class')", "def test_beginning_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.10\", \"3.10\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"beginning-of-line\",\n )" ]
[ "0.704092", "0.70044655", "0.6639948", "0.6638953", "0.65686834", "0.65359443", "0.6508109", "0.6495619", "0.643054", "0.6405408", "0.63980496", "0.63929", "0.6392692", "0.63884795", "0.63859123", "0.63859123", "0.6350053", "0.6331821", "0.6329094", "0.6329094", "0.63262904", "0.63060284", "0.6253524", "0.6253524", "0.6253524", "0.6253524", "0.6253524", "0.6253524", "0.6253524", "0.62514967", "0.6249631", "0.6249631", "0.6249631", "0.6249631", "0.6249631", "0.6249001", "0.62446743", "0.6241719", "0.62371904", "0.6230489", "0.6214128", "0.6214128", "0.61893773", "0.6179329", "0.6179329", "0.61680174", "0.61649674", "0.6147933", "0.6141428", "0.6136835", "0.61358947", "0.6129527", "0.6090829", "0.6072843", "0.6069914", "0.60606474", "0.60161585", "0.6010203", "0.59641236", "0.5962467", "0.5948293", "0.5937274", "0.5915456", "0.589643", "0.58834976", "0.5881341", "0.58642316", "0.58549494", "0.58529156", "0.58365774", "0.5833128", "0.58308595", "0.5805898", "0.57914305", "0.57837254", "0.578372", "0.57814515", "0.576918", "0.5768244", "0.5765805", "0.57635105", "0.5763297", "0.57630616", "0.57611245", "0.5760603", "0.5729843", "0.5729843", "0.57277656", "0.5723648", "0.57130367", "0.57070506", "0.56994814", "0.5695621", "0.56912017", "0.56866896", "0.56841797", "0.5675818", "0.56715494", "0.56694996", "0.56670964" ]
0.7327849
0
Add to the list of describing adjectives.
def add_adjectives(self, adjective): self.adjectives += [adjective]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def add(self):\n pass", "def add_many_descriptors(self, descriptors):", "def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)", "def add_disease(self, disease):\n self.diseases.append(disease)", "def addObjective(self, *args):\n return _libsbml.ListOfObjectives_addObjective(self, *args)", "def add_experience(self, state, action, reward, next_state, done):\n experience = (state, action, reward, next_state, done)\n for i, k in enumerate(self.data_keys):\n getattr(self, k).append(experience[i])\n self.size += 1", "def add_diameter(self, dia):\n self.diameters.append(dia)", "def addEntry(self, listDictions):\n ## load xml\n improvDoc = loadIMProvFile(self.argsFile)\n entrname= 'Job'\n for dictions in listDictions:\n report = IMProvNode(entrname , None, **dictions)\n improvDoc.addNode(report)\n outfile = file( self.argsFile, 'w').write(str(improvDoc))\n return", "def Add(self, *args):\n return _BRepAlgo.BRepAlgo_AsDes_Add(self, *args)", "def add(self, *args):\n pass", "def add(self, *args):\n pass", "def AddConcept(self, concept):\n self.concepts.append(concept)", "def add_descriptor(self, descriptor):", "def do_add(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['AddingVariablesList']))\n\t\t'''\n\n\t\t#Apply\t\n\t\tself.map('append',map(\n\t\t\t\t\t\t\t\t\tlambda __AddingVariable:\n\t\t\t\t\t\t\t\t\t{'LiargVariablesList':[__AddingVariable]},\n\t\t\t\t\t\t\t\t\tself.AddingVariablesList\n\t\t\t\t\t\t\t\t)\n\t\t\t\t)", "def addDemographics(self):\n p = self.p\n demographics_data = {\n 'dob': p.dob,\n 'gender': p.gender,\n 'email': p.email,\n 'fname': p.fname,\n 'lname': p.lname,\n 'hphone': p.home,\n 'cphone': p.cell,\n 'country': p.country,\n 'city': p.city,\n 'pcode': p.pcode,\n 'region': p.region,\n 'street': p.street,\n }\n self.demographics_doc = DEMOGRAPHICS.sub(demographics_data).done()", "def add(self, name, content):\n raise NotImplementedError", "def add(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def add_tag(self, tag):\n\n # directional relation: tag is the blank of everything in the list\n self.relations[tag] = {\n \"overlord\": [],\n \"hegemon\": [], # for tributary\n \"tributary\": [],\n \"vassal\": [],\n \"guaranteeing\": [],\n \"guarantor\": [],\n \"alliance\": [],\n \"senior\": [],\n \"junior\": [],\n \"marriage\": []\n }", "def add_condiments(self):\n print(\"Adding Lemon\")", "def add_food(self, _food):\n self.food.append(_food)", "def add_objective(self, objective):\n self.objectives.append(objective)", "def __add__(self, notes):\n self.add_notes(notes)\n return self", "def add_emb(self, emb):\n self.embs.append(emb)", "def add(self, *items):", "def append(self, dpr):\r\n self.childlist.append(dpr)", "def add(self, experience):\n self.buffer.append(experience)", "def add_descriptions_to_confusion_matrix(self):\n topic_names = []\n for topic_num in self.topic_numbers:\n topic_names.append(self.topic_names[topic_num])\n for index, row in enumerate(self.confusion_matrix):\n row.insert(0,topic_names[index])\n topic_names_for_matrix = topic_names.copy()\n topic_names_for_matrix.insert(0,\"\")\n self.confusion_matrix.insert(0,topic_names_for_matrix)\n self.confusion_matrix_true.insert(0,topic_names_for_matrix)", "def to_add(self):\n pass", "def add(self, item):", "def add_description(self, description):\n self.quest_node['description'] = description\n self.description = description\n graph.push(self.quest_node)", "def add_animals(self, *args):\n if self.validate_requirements(args):\n [self.animals.append(arg) for arg in args]\n else:\n print(\"foobar\")", "def EventContentMissionExcelAddDescription(builder, Description):\n return AddDescription(builder, Description)", "def add_description(self, desc):\n self.description = desc", "def add_advices_to_user(self, id_user):\n # get data\n advice_type_id = RobotAdviceType.objects.values_list(\"id\").get(type=\"default\")\n advices_id = RobotAdvices.objects.values_list(\"id\").filter(robot_advice_type=advice_type_id)\n\n # add new advices to user\n for advice_id in advices_id:\n advice = RobotAdvices.objects.get(id=advice_id[0])\n user = self.user.objects.get(id=id_user)\n AdvicesToUser.objects.create(user=user, advice=advice)", "def add(lst):\n # TODO", "def _add_descriptors(related):\n\n for r in related:\n r[\"descriptors\"] = []\n for edge in G.edges(data=True):\n sibling_idx = _get_connected(edge, r[\"tokenIndex\"])\n if sibling_idx and (A.lookup[int(sibling_idx)][\"pos\"] == \"JJ\" or edge[2][\"dep\"] in [\"amod\", \"compound\"]):\n r[\"descriptors\"].append(\n {\n \"tokenIndex\": sibling_idx,\n \"rawName\": A.lookup[int(sibling_idx)][\"word\"]\n }\n )\n\n if sibling_idx and \"NN\" in A.lookup[int(sibling_idx)][\"pos\"] and \"amod\" in edge[2][\"dep\"]:\n additional_related = _get_cousin(sibling_idx, [\"nmod\"])\n for add in set(additional_related):\n related = _add_related(add, \"nmod\", related, A.index_lookup[add],\n connector=G.nodes[sibling_idx]['word'])\n return related", "def adjoint(self, add, model, data):\n self.checkDomainRange(model, data)\n self.ops[0].adjoint(add, model, data.vecs[0])\n for idx in range(1, self.n):\n self.ops[idx].adjoint(True, model, data.vecs[idx])", "def add(self, element):\n pass", "def add(self, Links__=None, AllLinks=None, Enabled=None, Name=None, Priority=None):\n # type: (List[str], bool, bool, str, int) -> Profile\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))", "def add(self):\n return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))", "def add(self, optim=None, more_data=[]):\r\n raise NotImplementedError()", "def add(self, optim=None, more_data=[]):\r\n raise NotImplementedError()", "def add(self, product):\n pass", "def add(self, *args):\n return _libsbml.ListWrapperCVTerm_add(self, *args)", "def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))", "def create_alternatives(self, alternatives, question):\n\n logging.info(f\"Alternativas: {alternatives}\")\n\n for alternative in alternatives:\n result = Alternative.objects.create(\n **alternative,\n question=question\n )\n question.alternatives.add(result)", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def addPersonality(self):\n\t\tself.personality += 1\n\t\tif self.personality > 10:\n\t\t\tself.personality = 10", "def __add__(self, element):\r\n self.elements += element", "def addProblems(self):\n if self.pid in Problem.problems: \n for prob in Problem.problems[self.pid]:\n subs = {'end': {'end': '2010-09-13'}}\n self._set_default_attrs(prob, subs)\n prob_string = PROBLEM.sub({\n 'onset':prob.start,\n 'resolution':prob.end,\n 'snomed':prob.snomed, \n 'name':prob.name\n }).done()\n self.data.append(SDMX.sub({'models':prob_string}, escape=False).done())", "def __add__(self, other: Union['_Imagine', '_ImagineMany']) -> '_ImagineMany':\n return _ImagineMany(self, other)", "def addQuestion(self):\n self.questions.append(Question(self))", "def add_deputy(self, deputy):\n self.deputies.append(deputy)", "def add_detachment(self):\n print(\"Which detachment would you like to add?\")\n for index, keys in enumerate(init.detachments_dict.keys()):\n print(str(index + 1) + '. ' + keys)\n # get input to decide which detachments to add\n user_input = input(\">> \")\n\n # allows users to add multiple detachments at once\n user_input = re.findall(r'[0-9]+|[a-zA-Z]+', user_input)\n for i in user_input:\n if i.isdigit():\n i = list(init.detachments_dict.keys())[int(i) - 1]\n print(\"Adding {} to army\".format(i))\n detach = Detachment(i)\n\n # populate compulsory slots\n for keys, values in detach.units_dict.items():\n if keys == \"Dedicated Transports\":\n continue\n while len(values) < detach.foc[keys][0]:\n print(\"***Adding compulsory units from \" + keys + \"***\")\n unit = self._create_user_unit(keys)\n self._add_unit(detach, unit)\n\n self.army.add_detachment(detach)\n return", "def add_architectural_elements(conn, cur, objects):\n \n print 'Adding architectural elements...',\n \n sift_detector = cv2.FeatureDetector_create('SIFT')\n sift_descriptor = cv2.DescriptorExtractor_create('SIFT')\n \n feature_id = 1\n \n for i, object in enumerate(objects):\n \n cur.execute('INSERT INTO views VALUES (\"{_id}\", \"{task_id}\", \"{name}\")'.format(\n _id = i+1,\n task_id = object['task_id'],\n name = object['view_name'],\n )\n )\n \n img = cv2.imread(VIEWS_PATH + object['view_name'])\n \n keypoints = sift_detector.detect(img)\n features = sift_descriptor.compute(img, keypoints)\n \n for keypoint, feature in zip(features[0], features[1]):\n \n cur.execute('INSERT INTO features VALUES (\"{_id}\", \"{view_id}\", \"{pt_x}\", \"{pt_y}\", \"{desc}\")'.format(\n _id = feature_id,\n view_id = i+1,\n pt_x = keypoint.pt[0],\n pt_y = keypoint.pt[1],\n desc = str(list(feature))[1:-1].replace('.0', '').replace(' ', '')\n )\n )\n \n feature_id += 1\n \n print 'done.'", "def add(self, *drawables):\n self.drawables.extend(drawables)", "def add(self, specification):\n self.specifications.append(specification)", "def add(self, obj):\n raise NotImplementedError", "def add_many(self, pair_list):\n\n for pair in pair_list:\n plug = PlugLead(pair)\n self.check_conflicts(plug)\n self.plugleads.append(plug)", "def add_dut(self):\n pass", "def add_words(self, words):\r\n for word in words:\r\n self.add(word)", "def appendFrom(self, *args):\n return _libsbml.ListOfObjectives_appendFrom(self, *args)", "def add():\n pass", "def add_item(self, command, description):\n self.append((command, description))", "def add_advice_to_user_created(cls, user, list_advice_id):\n for id_advice in list_advice_id:\n advice = RobotAdvices.objects.get(id=id_advice)\n AdvicesToUser.objects.create(user=user, advice=advice)", "def addModifiers(self):\n return _libsbml.Model_addModifiers(self)", "def add_paragraph_option():\n extra_content_lst = []\n paragraph = input('Do you want to add another paragraph to your website? [yes]')\n while paragraph == 'yes' or paragraph == '':\n title = input('Title of your paragraph?')\n content = input('Content of your paragraph (single line)')\n images = image_option()\n extra_content_lst.append((ContentInfo(title, content, images, None)))\n paragraph = input('Do you want to add another paragraph to your website? [yes]')\n\n return extra_content_lst", "def apilar(self,dato):\r\n\t\tself.elementos.append(dato)\r\n\t\tself.len += 1", "def append_collectable(self, newnotes: List):\n self.notes.extend(newnotes)", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def add_AdDescription(self, url, description):\n layout = BoxLayout(size_hint_y = 4)\n layout.add_widget(AsyncImage(source = url))\n layout.add_widget(Label(text = description))\n self.layout.add_widget(layout)", "def add(element):", "def addKeyWord(self, kWord):\n #kWord.printKeyWord()\n self.sentence.append(kWord)", "def add(self, iterable):\n raise NotImplementedError()", "def add(self, elem):", "def add(self, elem):", "def add_descriptors(self, mapping):\n for key, desc in mapping.iteritems():\n self.descriptors[int(key, 16)] = desc", "def add_descriptors(self, mapping):\n for key, desc in mapping.iteritems():\n self.descriptors[int(key, 16)] = desc", "def add(self, name, value) -> None:\n ...", "def __add__(self, other: Union[_Imagine, '_ImagineMany']) -> '_ImagineMany':\n return _ImagineMany(self, other)", "def add(self, die_):\n self._dice.append(die_)", "def add(self, sentence):\n self._sentences.add(sentence)", "def add_D(self, D):\n self.Ds.append(D)", "def append(self, forms, lemmas, cpostags, postags, feats, heads, deprels):\n self.__init__(\n self._forms + list(forms),\n self._lemmas + list(lemmas),\n self._cpostags + list(cpostags),\n self._postags + list(postags),\n self._feats + list(feats),\n self._heads + list(heads),\n self._deprels + list(deprels)\n )", "def add(self, cards):\n\n super().add(cards)\n self._update_value()", "def add_discount(self, discount):\n self.discounts.append(discount)", "def add_questions(self, questions):\n for question in questions:\n self.questions.append(question)", "def add_content(self, more_content, no_docstring=False):\r\n # MatAttributeDocumenter.add_content(self, more_content,\r\n # no_docstring=True)\r\n MatAttributeDocumenter.add_content(self, more_content, no_docstring)", "def add(self, item: Any) -> None:\n pass", "def descriptions(self, descriptions):\n\n self._descriptions = descriptions", "def __init__(self, description):\n self.contents = HashMap()\n for gdl in description:\n if not self.contents.containsKey(key):\n self.contents.put(key, ArrayList())\n self.contents.get(key).add(rule)", "def register_adhocs(self):\n aboutform = self.plugin['xep_0004'].makeForm('form', \"About SleekBot\")\n aboutform.addField('about', 'fixed', value= self.__doc__)\n self.plugin['xep_0050'].addCommand('about', 'About Sleekbot', aboutform)\n pluginform = self.plugin['xep_0004'].makeForm('form', 'Plugins')\n plugins = pluginform.addField('plugin', 'list-single', 'Plugins')\n for key in self.cmd_plugins:\n plugins.addOption(key, key)\n plugins = pluginform.addField('option', 'list-single', 'Commands')\n plugins.addOption('about', 'About')\n #plugins.addOption('config', 'Configure')\n self.plugin['xep_0050'].addCommand('plugins', 'Plugins', pluginform, self.form_plugin_command, True)", "def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def _add_meta(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def get_adjectives(self):\n random.shuffle(self.adjectives)\n return self.adjectives", "def __add__(self,_VariablesList):\n\n\t\t#Call the add method\n\t\tself.add(_VariablesList)\n\n\t\t#Return \n\t\treturn self", "def add(self, d):\n new = self.copy()\n new.update(d)\n return new" ]
[ "0.7263275", "0.62598264", "0.5947216", "0.57094675", "0.5695133", "0.5606961", "0.56066847", "0.5586834", "0.5575344", "0.5548141", "0.5520845", "0.5520845", "0.55087703", "0.5452977", "0.5440875", "0.53886217", "0.5388016", "0.5352896", "0.5326072", "0.5325428", "0.5324635", "0.5306004", "0.5275961", "0.5274832", "0.52725625", "0.52449703", "0.522418", "0.52171415", "0.52053314", "0.5205009", "0.5202241", "0.51998526", "0.5199701", "0.51987034", "0.5193855", "0.5191535", "0.51829636", "0.5177303", "0.51657784", "0.5130658", "0.5126315", "0.51199913", "0.51199913", "0.5113955", "0.5113316", "0.5103743", "0.5102698", "0.5098974", "0.50988907", "0.50975025", "0.50972795", "0.5086273", "0.507103", "0.5067208", "0.50651103", "0.5062401", "0.5060921", "0.50474197", "0.50471115", "0.5035069", "0.5032484", "0.503024", "0.503004", "0.5028697", "0.50273687", "0.50254565", "0.5023989", "0.50236714", "0.50146836", "0.49945998", "0.49920094", "0.49913198", "0.49905953", "0.49824607", "0.49759683", "0.49692625", "0.49692625", "0.4967445", "0.4967445", "0.49640262", "0.4960409", "0.49577758", "0.49571782", "0.4950486", "0.49419475", "0.494125", "0.4939759", "0.49387482", "0.49319613", "0.493164", "0.49288365", "0.49266714", "0.49256387", "0.49239784", "0.49219733", "0.49213344", "0.49208766", "0.49206114", "0.49155834", "0.4911131" ]
0.7504388
0
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
def get_adjectives(self): random.shuffle(self.adjectives) return self.adjectives
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives", "def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]", "def getAdjectives(self, word):\n\t\tadjectives = set()\n\t\tfor synset in wordnet.synsets(word):\n\t\t\tif synset.pos == ADJ:\n\t\t\t\tfor synonym in synset.lemma_names:\n\t\t\t\t\tadjectives.add(synonym)\n\t\treturn adjectives", "def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjectives", "def desc_with_default(self) -> List[str]:\n return self.desc[:]", "def pred(self):\n return [ self.simple_reflection(i) for i in self.descents() ]", "def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))", "def _read_advantages(root):\n output_list = []\n for _, value in enumerate(root[0][3]):\n output_list.append(Advantage(value))\n return output_list", "def get_advice():\n json_response = random_adviceslip()\n advice = parse_advice(json_response=json_response)\n return advice", "def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names", "def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories", "def get_ads():\n return coll_ad.distinct(KEY_AD_ID)", "def disease_descriptors(civic_did8):\n return [civic_did8]", "def _choose_babble_phrases(self) -> tuple:\n noun_choices = ('singular nouns', 'plural nouns')\n noun_choice = self.random_element(noun_choices)\n\n adjective_choices = (\n 'adjectives starting with consonant',\n 'adjectives starting with vowel')\n\n if noun_choice == 'singular nouns':\n article_choice = self.random_element(self.article_choices)\n else:\n article_choice = 'the'\n\n if article_choice == 'an':\n adjective_choice = 'adjectives starting with vowel'\n elif article_choice == 'a':\n adjective_choice = 'adjectives starting with consonant'\n else:\n adjective_choice = self.random_element(adjective_choices)\n\n return (\n self.technobabble['verbs'],\n article_choice,\n self.technobabble[adjective_choice],\n self.technobabble[noun_choice])", "def load_verbs(self, verbs: List[str]) -> List[str]:\n\n neg_verbs = get_verbs(verbs)\n\n neg_verbs_preceding = neg_verbs.loc[\n ((neg_verbs[\"mode\"] == \"Indicatif\") & (neg_verbs[\"tense\"] == \"Présent\"))\n | (neg_verbs[\"tense\"] == \"Participe Présent\")\n | (neg_verbs[\"tense\"] == \"Participe Passé\")\n | (neg_verbs[\"tense\"] == \"Infinitif Présent\")\n ]\n neg_verbs_following = neg_verbs.loc[neg_verbs[\"tense\"] == \"Participe Passé\"]\n list_neg_verbs_preceding = list(neg_verbs_preceding[\"term\"].unique())\n list_neg_verbs_following = list(neg_verbs_following[\"term\"].unique())\n\n return (list_neg_verbs_preceding, list_neg_verbs_following)", "def getEssentialList(self):\n return self.essentials", "def card_fields_in_order(self) -> List[str]:\n card_in_anki_order = [self.word, self.pronunciation, self.sentence,\n self.definitions, self.book_title, self.author]\n return card_in_anki_order", "def DumpDetails(self, sentences, label=\"N.A.\"):\n AdjR = 0.0\n adjAll = []\n for sentence in sentences:\n # if sentence[\"Text\"].startswith(\"Joanie is not helpful\"):\n # x = 1\n adjectives, dependencies = self.ExtractSentDetails(sentence)\n adjAll.extend(adjectives)\n allAdjectives = adjectives | Angel.GlobalAdjList\n AdjS = 0.0\n words = wordpunct_tokenize(sentence[\"Text\"])\n if len(words) <= 3:\n allAdjectives |= set([x.lower() for x in words])\n for i in range(len(words)):\n word = words[i].lower()\n if word in {\"but\", \"if\"}:\n AdjS = 0.0\n print words[i],\n elif word in allAdjectives and word in self.lexicon:\n multiplier = self.PredictMultiplier(word, dependencies[word], words, i)\n score = float(self.lexicon[word]) * multiplier\n if multiplier < 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['underline'])\n elif multiplier > 1:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red',None,['bold'])\n else:\n colortext = colored(words[i] + \" (\" + '{:.3}'.format(score) + \")\", 'red')\n AdjS += score\n print colortext,\n else:\n print words[i],\n print\n colortext = colored(\"Adjectives: \" + '{:.3}'.format(AdjS),'red')\n print colortext\n AdjR += AdjS\n print\n print \"Label:\", label\n base = self.PredictBase(adjAll)\n colortext = colored(\"Adjectives: \" + str(AdjR) + \"*\" + str(base) + \" = \" + str(AdjR*base),'red')\n print colortext", "def add_adjectives(self, adjective):\n self.adjectives += [adjective]", "def random_advice(message):\n advice = requests.get(\"https://api.adviceslip.com/advice\").json()['slip']['advice']\n\n return advice", "def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities", "def setup_random_opinions_representatives():\r\n global CATEGORIES\r\n \r\n ideas_dic = {}\r\n \r\n for i in CATEGORIES:\r\n #idea = Idea(1,\"\",i, 1-random.expovariate(6))\r\n if i == 1:\r\n idea = Idea(1,\"\",i, random.uniform(-1,-0.5))\r\n elif i == 2:\r\n idea = Idea(1,\"\",i, random.uniform(-1,-0.5))\r\n elif i == 3:\r\n idea = Idea(1,\"\",i, random.uniform(0.5,1))\r\n \r\n# idea = Idea(1,\"\",i, random.uniform(0.5,1))\r\n ideas_dic[i] = idea\r\n \r\n return ideas_dic", "def getAchievements(self) -> list:\n return self.state[ACHIEVEMENTS]", "def darts(self):\r\n return self.alphas[0].keys()", "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]", "def effect_list(self):\n moods = []\n for mood in self._moodlist:\n if \"name\" in mood:\n moods.append(mood['name'])\n return moods", "def get_ideas(self):\n fields = ['content', 'clusterIDs', 'isGamechanger',\n 'userID', 'promptID']\n return self.get_data(\"ideas\", fields)", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def p_banner():\n return random.choice([banner, banner_two, banner_three, banner_four, banner_five])", "def give_lists(self):\r\n hard = []\r\n easy = []\r\n medium = []\r\n for question in self.get_all():\r\n if question.difficulty_level == \"easy\":\r\n easy.append(question)\r\n elif question.difficulty_level == \"medium\":\r\n medium.append(question)\r\n else:\r\n hard.append(question)\r\n return easy,medium,hard", "def shuffled_answers(self):\n if self.answers is None:\n return []\n answers = list(self.answers)\n shuffle(answers)\n return answers", "def _generateDescription(self, obj, **args):\n result = []\n if obj.description:\n label = self._script.utilities.displayedLabel(obj) or \"\"\n name = obj.name or \"\"\n desc = obj.description.lower()\n if not (desc in name.lower() or desc in label.lower()):\n result.append(obj.description)\n return result", "def get_dialogue_acts(self):\n return self.DAs", "def variation_descriptors(civic_vid33):\n return [civic_vid33]", "def getArrettes(self) -> list:\n return self._arrettes", "def get_describable_list(request):\n describables = []\n\n from django.apps import apps\n for entity in apps.get_app_config('descriptor').describable_entities:\n content_type = get_object_or_404(\n ContentType, app_label=entity._meta.app_label, model=entity._meta.model_name)\n\n describables.append({\n 'id': content_type.pk,\n 'value': \"%s.%s\" % (entity._meta.app_label, entity._meta.model_name),\n 'label': str(entity._meta.verbose_name.capitalize())\n })\n\n return HttpResponseRest(request, describables)", "def get_description():\r\n return{\"I'll never yield!\":\"Grants a shield.\",\r\n \"Stay still!\":\"Affected unit cannot act in their turn.\"\r\n }", "def single_introduction(end):\n return [random.randint(0,end)]", "def description_ques(analysis):\n if analysis.sv[0].vrb_tense.startswith('present'):\n analysis.sv[0].vrb_tense = 'present progressive'\n if analysis.sv[0].vrb_tense.startswith('past'):\n analysis.sv[0].vrb_tense = 'present progressive'\n sentence = y_o_question(analysis)\n for i in sentence:\n if i == 'liking':\n sentence[sentence.index(i)] = 'like'\n return ['what'] + sentence", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def _sample_episode(self):\n\n Knovel = random.sample(self.labelIds, self.nKnovel)\n #print(Knovel)\n #exit(0)\n nKnovel = len(Knovel)\n assert((self.nTestNovel % nKnovel) == 0)\n nEvalExamplesPerClass = int(self.nTestNovel / nKnovel)\n #print(nEvalExamplesPerClass)\n #exit(0)\n Tnovel = []\n Exemplars = []\n for Knovel_idx in range(len(Knovel)):\n ids = (nEvalExamplesPerClass + self.nExemplars)\n img_ids = random.sample(self.labels2inds[Knovel[Knovel_idx]], ids) \n\n imgs_tnovel = img_ids[:nEvalExamplesPerClass]\n imgs_emeplars = img_ids[nEvalExamplesPerClass:]\n #print(imgs_tnovel)\n #exit(0)\n Tnovel += [(img_id, Knovel_idx) for img_id in imgs_tnovel]\n Exemplars += [(img_id, Knovel_idx) for img_id in imgs_emeplars]\n assert(len(Tnovel) == self.nTestNovel)\n assert(len(Exemplars) == nKnovel * self.nExemplars)\n random.shuffle(Exemplars)\n random.shuffle(Tnovel)\n\n return Tnovel, Exemplars", "def getAllFirstDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.append(worldItems[item][DESCWORDS][0])\r\n return list(set(descWords))", "def get_techniques(db):\n\n techniques = []\n for element in db:\n for technique in element['techniques_used']:\n if technique not in techniques:\n techniques.append(technique)\n \n return sorted(techniques)", "def animals(self):\n return self.herbivores + self.carnivores", "def buscaPalavras(self):\n dataSet=self.stemmerAplay()\n todasPalavras =[]\n for (notice, clazz) in dataSet:\n todasPalavras.extend(notice)\n return todasPalavras", "def get_feature_names(self):\n\t\treturn np.array(['nouns', 'adjectives', 'verbs', 'adverbs'])", "def getListOfObjectives(self, *args):\n return _libsbml.FbcModelPlugin_getListOfObjectives(self, *args)", "def get_verb_list(lang: Lang) -> VerbsList:\n with open(f'languages/{lang}/verbs.csv', newline='') as csv_file:\n return list(reversed([VerbInf(word[0]) for word in csv.reader(csv_file, delimiter=',')]))", "def diamonds(self):\n return sorted(tuple([v for v in self if v.suit == 'diamonds']), reverse=True)", "def get_hero_list(self):\n out_list = []\n for key, _ in self._heroes.items():\n out_list.append(key)\n out_list.sort()\n return out_list", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def list(self, frame=0):\n text = []\n if not self.number_of_variations:\n return \"\"\n for group_number in range(1, len(self.varexercise_numbers)+1):\n text.extend(\n self.group_list(group_number))\n return text", "def listPalettes(show=True, getdescriptions=True):\n\n # Print palette names to terminal if that's what the user wants\n if show:\n print \"Currently available palettes:\"\n \n for palette_name in sorted(palettes.keys()):\n print palette_name,\n if not getdescriptions:\n print\n else:\n if ((not (palette_name in descriptions)) \n or (descriptions[palette_name] == \"\")):\n print ((\": Palette %s does not have a description, \"\n \"please write one and add it to the descriptions dict \"\n \"in palettes.py.\") % palette_name)\n continue\n # end if\n print \": %s\" % descriptions[palette_name]\n # end if\n # end for\n # end if\n \n if not getdescriptions:\n return palettes.keys()\n else:\n return (palettes.keys(), descriptions)\n # end if", "def get_to_review(self) -> VerbsList:\n to_review = [verb for verb, stats in self.practice_list.items() if pick(stats)]\n\n if len(to_review) < self.min_to_review:\n for _ in range(self.min_to_review - len(to_review)):\n to_review.append(self.to_learn_list.pop())\n\n return to_review", "def Obtener_Lista(self):\n\t\treturn [self,self.Nombre+\" \"+self.Apellido,self.ID,self.Fecha, \n\t\tself.Edad,self.Test,self.Posicion,self.Prioridad,self.Progreso,self.Informe]", "def random_data(self) -> (str, str):\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n definition = \"\"\n while True:\n if len(synsets) != 0:\n for synset in synsets:\n if synset.lexname() == 'noun.animal':\n definition = synset.definition()\n break\n else:\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n return random_animal, definition", "def hp_descartes(self, hyper_params=None):\n if hyper_params is None: hyper_params = list(self.hyper_params.values())\n if len(hyper_params) == 0: yield OrderedDict()\n else:\n first_hp = hyper_params[0]\n assert isinstance(first_hp, CategoricalHP)\n for val in first_hp.choices:\n configs = OrderedDict()\n configs[first_hp.name] = val\n for cfg_dict in self.hp_descartes(hyper_params[1:]):\n configs.update(cfg_dict)\n yield configs", "def get_samples(meta, classes_list):\n\n h = list(meta[meta['DiseaseState'].isin(classes_list[0])].index)\n dis = list(meta[meta['DiseaseState'].isin(classes_list[1])].index)\n\n return [h, dis]", "def get_des(tree_before: Node, tree_after: Node) -> List[Description]:\n\n des_gen: Iterator[Description] = get_des_gen(tree_before, tree_after)\n\n return list(des_gen)", "def apertures(self):\n return self._apertures", "def get_details(disease):\n\treturn d_desc_map[disease]", "def list(self):\n\t\treturn self.link_words", "def get_introduction(length=128, words=None):", "def getaslist(self):\n l = []\n l.append(self.title.strip().encode('utf-8'))\n l.append(self.desc.strip().encode('utf-8'));\n l.append(self.course_number.strip().encode('utf-8'))\n l.append(self.duration.strip().encode('utf-8'))\n l.append(self.difficulty.strip().encode('utf-8'))\n l.append(self.instructors.strip().encode('utf-8'))\n l.append(self.url.strip().encode('utf-8'))\n return l", "def show_pairs(self):\n\n all_pairs = []\n for plug in self.plugleads:\n all_pairs.append(plug.pair)\n return all_pairs", "def get_verbs(self) -> Set[str]:", "def get_attributes(cls):\r\n return [\r\n Attribute(\"show_correctness\", \"always\"),\r\n Attribute(\"submitted_message\", \"Answer received.\"),\r\n Attribute(\"label\", \"\"),\r\n ]", "def make_ad(state):\n return [adnumber(val) for val in state]", "def list(self):\n a0 = map(self._alphabet.unrank, self._labels[0])\n a1 = map(self._alphabet.unrank, self._labels[1])\n return [a0, a1]", "def sample_effects(self) -> List[List[str]]:\n return [\n [\n str(item.sample() if isinstance(item, RandomValue) else item)\n for item in effect\n ]\n for effect in self.effects\n ]", "def desc (self):\n uni = {\n \"name\": self.name,\n \"G\": self.G,\n \"time\": self.time,\n \"bodies\": {bid: body.desc() for bid, body in self.bodies.iteritems()},\n \"description\": self.description,\n \"datatime\": self.datatime\n }\n\n return uni", "def create_samples(descriptions, candidates_lists, labels):\n samples = []\n for description, candidate_list, label in tqdm(\n zip(descriptions, candidates_lists, labels)\n ):\n negative_examples = [\n candidate for candidate in candidate_list if candidate != label\n ]\n negative_examples = random.choices(negative_examples, k=4)\n if label in candidate_list:\n positive_example = [\n candidate for candidate in candidate_list if candidate == label\n ][0]\n else:\n positive_example = label\n samples.append(InputExample(texts=[description, positive_example], label=1))\n for neg_ex in negative_examples:\n samples.append(InputExample(texts=[description, neg_ex], label=0))\n return samples", "def list(cls):\n\n forges = cls.forges()\n\n return {\"forges\": [{\"id\": id, \"description\": forges[id]} for id in sorted(forges.keys())]}", "def get_alignments(self) -> list:", "def return_augmented_sentences(self) -> list:\n return self.augmented_sentence_list", "def describe(self):\n if not self.name:\n raise ValueError(\"Sorry! id_type must be 'name'\")\n r = requests.get(f\"https://api.fda.gov/drug/ndc.json?search=brand_name:{self.drug_id}\")\n response = r.json()\n data = response['results'][0]\n self.brand_name = data['brand_name']\n self.generic_name = data['generic_name']\n self.active_ingredients = [i['name'] for i in data['active_ingredients']]\n self.pharm_class = get_pharm_class(self.drug_id, as_df=False)\n self.route = data['route']\n self.ndc = data['product_ndc']\n self.product_type = data['product_type']\n\n print(f\"Generic name: {self.generic_name}\")\n print(f\"Brand name: {self.brand_name}\")\n print(f\"Active ingredients: {self.active_ingredients}\")\n print(f\"Routes of administration: {self.route}\")\n print(f\"Pharmacologic Classes: {self.pharm_class}\")\n print(f\"NDC: {self.ndc}\")\n print(f\"Product type: {self.product_type}\")", "def get_vo_descriptions(vos):\n all_vos = vo_core.list_vos()\n vos_with_desc = []\n for vo in all_vos:\n if vo['vo'] in vos:\n vos_with_desc.append((vo['vo'], vo['description']))\n return vos_with_desc", "def therapy_descriptors(civic_tid146):\n return [civic_tid146]", "def get_antags(self):\n antags = []\n for obj in self.antagobjs.group_by(AntagObjective.mindkey):\n antag = {'key': obj.mindkey, 'name': obj.mindname, 'role': obj.special_role}\n antags.append(antag)\n return antags", "def sample_apr(self):\n FLAGS.full_wiki = True\n FLAGS.apr_dir = 'Directory Name'\n apr = apr_lib.ApproximatePageRank()\n seeds = [\n 'Q7755', 'Q878070', 'Q428148', 'Q679847', 'Q2609670', 'Q174834',\n 'Q188628'\n ]\n unique_facts = apr.get_facts(\n seeds, topk=200, alpha=0.9, seed_weighting=True)\n facts = sorted(unique_facts, key=lambda tup: tup[1][1], reverse=True)\n nl_facts = ' . '.join([\n str(x[0][0][1]) + ' ' + str(x[1][0][1]) + ' ' + str(x[0][1][1])\n for x in facts\n ])\n tf.logging.info('Extracted facts: %s', nl_facts)", "def get_pledge_list():\n with open('pledge.txt') as f:\n pledge_list = f.read().split()\n\n #Eliminate punctuation from the end of words\n pledge_list_noPunct = [word if word[-1] in ascii_letters else word[:-1] for word in pledge_list]\n\n return pledge_list_noPunct", "def _process_examples(self):\n logger = getLogger(\"problog_lfi\")\n # value can be True / False / None\n # ( atom ), ( ( value, ... ), ... )\n\n # Simple implementation: don't add neutral evidence.\n\n # ad_groups is a list of lists where each list contains an AD\n ad_groups = list()\n for ad in self._adatoms:\n # if it's an AD group\n if len(ad[1]) > 1:\n ad_list = []\n for var in ad[1]:\n ad_list.append(Term(self.names[var].functor, *self.names[var].args))\n ad_groups.append(tuple(ad_list))\n logger.debug(\"AD Groups\\t\\t:\" + str(ad_groups))\n\n def all_false(d):\n \"\"\"\n This function recognizes inconsistent evidence s.t. all values are False in AD.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :return: whether all values are False\n \"\"\"\n # false_count should be the same as the length of d\n false_count = sum(v is False for v in d.values())\n return false_count == len(d)\n\n def all_false_except_one(d):\n \"\"\"\n This function recognizes incomplete evidence s.t.\n the non-False value in AD needs to be set to True.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :return: whether all values except one are False\n \"\"\"\n false_count = sum(v is False for v in d.values())\n the_left_is_none = bool(sum(v is None for v in d.values()))\n return (false_count == len(d) - 1) and the_left_is_none\n\n def getADtemplate(d, atom=None):\n \"\"\"\n This function gets atom's complement AD template.\n This should only be used when the AD contains non-ground terms.\n :param d: dictionary of ADs in form {term: value}\n value can be True, False, None, \"Template\"\n :param atom: an evidence\n :return: atom's complement AD template\n \"\"\"\n if atom is not None:\n temp_dict = {\n k: v\n for k, v in d.items()\n if v == \"Template\" and atom.signature != k.signature\n }\n return temp_dict\n else:\n temp_dict = {k: v for k, v in d.items() if v == \"Template\"}\n return temp_dict\n\n def add_to_ad_evidence(pair, l, ADtemplate):\n \"\"\"\n :param pair: a new pair of (atom, value)\n :param l: a list of dictionaries, all dictionaries need to have the same format\n :return:\n \"\"\"\n (k, v) = pair\n # if entry k exists, update the value with k\n for d in l:\n if k in d:\n d[k] = v\n return\n # if entry k does not exist, create a new dictionary from template\n # and instantiate it with k\n new_d = dict()\n for temp_k in ADtemplate.keys():\n new_key = Term(temp_k.functor, *k.args)\n new_d[new_key] = None\n # put v in there\n new_d[k] = v\n l.append(new_d)\n\n if self.infer_AD_values:\n result = ExampleSet()\n inconsistent = False\n # iterate over all examples given in .ev\n for index, example in enumerate(self.examples):\n ad_evidences = []\n non_ad_evidence = {}\n for ad_group in ad_groups:\n # create a dictionary to memorize what evidence is given in AD\n d = dict()\n # TODO: what if the AD contains both ground and non-ground????\n # e.g. t(_)::a; t(_)::b(X)\n for var in ad_group:\n if var.is_ground():\n d[var] = None # for ground unknown evidence\n else:\n d[var] = \"Template\" # for unground unknown evidence\n ad_evidences.append(d)\n\n # add all evidence in the example to ad_evidences\n for atom, value in example:\n # if atom has a tunable probability to learn\n if any([atom.signature == name.signature for name in self.names]):\n if len(atom.args) == 0: # Propositional evidence\n # insert evidence\n is_ad = False\n for d in ad_evidences:\n if atom in d:\n d[atom] = value\n is_ad = True\n if not is_ad:\n non_ad_evidence[\n atom\n ] = value\n else: # First Order evidence\n # find the right AD dictionary : AD_dict\n AD_dict = None\n for d in ad_evidences:\n if any([atom.signature == k.signature for k in d]):\n AD_dict = d\n # if the instantiation is new, add it as a key to the dictionary\n if AD_dict and AD_dict.get(atom) is None:\n AD_dict[atom] = value\n # also add other AD parts in the dictionary with value==None\n other_ADs = getADtemplate(AD_dict, atom)\n for otherAD in other_ADs.keys():\n new_key = Term(otherAD.functor, *atom.args)\n AD_dict[new_key] = AD_dict.get(new_key, None)\n else:\n non_ad_evidence[atom] = value\n else:\n non_ad_evidence[atom] = value\n\n # grounded_ad_evidences contains all usable evidence (gound, not template)\n grounded_ad_evidences = []\n for d in ad_evidences:\n # for first order evidence dictionaries\n if \"Template\" in d.values():\n # new_ad_evidence is a list of dictionaries\n # each dictionary is a group of the AD template instantiation\n new_ad_evidence = list()\n # get template AD evidence\n ADtemplate = getADtemplate(d)\n # group all pairs according to ADtemplate\n for k, v in d.items():\n if v != \"Template\":\n add_to_ad_evidence((k, v), new_ad_evidence, ADtemplate)\n grounded_ad_evidences += new_ad_evidence\n else:\n # simply us them\n grounded_ad_evidences.append(d)\n\n inconsistent_example = False\n for i, d in enumerate(grounded_ad_evidences):\n # inconsistent1 = multiple_true(d)\n inconsistent2 = all_false(d)\n add_compliment = all_false_except_one(d)\n\n if inconsistent2:\n inconsistent_example = True\n continue\n elif add_compliment:\n for key, value in d.items():\n if value is None:\n grounded_ad_evidences[i][key] = True\n\n if not inconsistent_example and len(grounded_ad_evidences) > 0:\n # There are (fully tunable) ADs in the program\n evidence_list = []\n for d in grounded_ad_evidences:\n for key, value in d.items():\n if value is not None:\n if (key, value) not in evidence_list:\n # TODO: Switch to orderedSet so we don't have to check whether it's in already\n evidence_list.append((key, value))\n\n for key, value in non_ad_evidence.items():\n evidence_list.append((key, value))\n\n atoms, values = zip(*evidence_list)\n result.add(index, atoms, values)\n\n else:\n # (No AD case) or (Inconsistent Evidence Case)\n atoms, values = zip(*example)\n result.add(index, atoms, values)\n return result\n else:\n # smarter: compile-once all examples with same atoms\n result = ExampleSet()\n for index, example in enumerate(self.examples):\n atoms, values = zip(*example)\n result.add(index, atoms, values)\n return result", "def ad_meta(self, d):\n names = []\n if is_str(d):\n aname = d[:-2] + 'name'\n if aname in self.ad_cols:\n names = [aname]\n else:\n names = [n[:-2] + 'name' for n in d if n[:-2] + 'name' in self.ad_cols]\n return self.configs['meta_ad'] + names", "def get_assay_solutions(self):\n vocabs = []\n assay = self.get_assay()\n if not assay:\n return vocabs\n for solution_type_name in assay.needed_solutions:\n type_batches = find(Type=solution_type_name,\n expires={'query': datetime.today().date(),\n 'range': 'min'},\n sort_on='expires')\n\n tmp = []\n for batch in type_batches:\n tmp.append([batch.id,\n batch.Title,\n batch.expires.strftime('%Y-%m-%d')])\n vocabs.append([solution_type_name, tmp])\n return vocabs", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"definition\",\n \"doseForm\",\n \"intendedRoute\",\n \"ingredient\",\n \"drugCharacteristic\",\n ]", "def descents(self, index_set=None, positive=False):\n if index_set==None:\n index_set=self.parent().index_set()\n return [ i for i in index_set if self.has_descent(i, positive) ]", "def getDescriptors(self, dsc = \"\"):\r\n return \"\"", "def get_descuentos(self):\n return float(\n self.input.get_text(liquidaciones_historicas_catalog.DESCUENTOS).replace(\".\", \"\").replace(\",\", \".\"))", "def desc(self) -> List[str]:\n format_data: dict = {\n key: val\n for key, val in self.__dict__.items()\n if (\n not key.startswith(\"_\") and key != \"desc\" and not callable(val)\n )\n }\n format_data.update(self._kwargs)\n ret: List[str] = []\n for descr in self._desc:\n try:\n descr = descr % format_data\n except Exception as kerr:\n raise PyParamException(\n f\"Description of {self.namestr()!r} is formatting \"\n \"using kwargs from contructor. \\n\"\n f\"- desc: {descr}\\n\"\n f\"- key : {{... {str(kerr)[1:-1]} ...}}\"\n ) from None\n else:\n ret.append(descr)\n return ret", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers", "def apertures(self):\n return tuple(a for a in self._apertures if a is not None)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def PredictBase(self, adjectives):\n # Get the list of Adjectives which have sentiment polarity greater than 0.1\n if self.smallReviews:\n return 1.0\n PolarAdjList = [l for l in adjectives if l in self.lexicon and math.fabs(float(self.lexicon[l])) > 0.1]\n if len(PolarAdjList) > 0:\n return 12.0/len(PolarAdjList)\n # elif len(list2) < 8:\n # return 2.0\n else:\n return 1.0", "def get_description(self):", "def ExtractSentDetails(self, sentence):\n if \"Adjectives\" in sentence:\n adjList = [w.lower() for w in sentence[\"Adjectives\"] if w.lower() not in Angel.stopWords and w.lower() not in Angel.engNames]\n adjectives = set(adjList)\n else:\n adjectives = set()\n dependencies = defaultdict(dict)\n if \"Dependencies\" in sentence:\n if not isinstance(sentence[\"Dependencies\"],list):\n sentence[\"Dependencies\"] = [sentence[\"Dependencies\"]]\n for dep in sentence[\"Dependencies\"]:\n line = dep.split(',')\n if len(line) != 3:\n continue\n relation, adj, other = line\n adj, other = adj.lower(), other.lower()\n if relation in {'amod', 'acomp', 'ccomp', 'pobj', 'dep'}:\n adj, other = other, adj\n if relation not in dependencies[adj]:\n dependencies[adj][relation] = set()\n dependencies[adj][relation].add(other)\n if relation == 'conj':\n adjectives.add(other)\n dictconj, other = defaultdict(dict), None\n for adj in dependencies:\n if 'conj' in dependencies[adj]:\n for other in dependencies[adj]['conj']:\n dictconj[other] = copy.deepcopy(dependencies[adj])\n for adj in dictconj:\n for relation in dictconj[adj]:\n if relation not in dependencies[adj]:\n dependencies[adj][relation] = set()\n dependencies[adj][relation] |= dictconj[adj][relation]\n return adjectives, dependencies", "def _get_appdesc_embeddings(self, features, training):\n # Using GloVe, embed each token then aggregate them.\n appdesc_embeddings = self._word_embedding_layer(\n features['appdesc_token_id'])\n appdesc_embeddings = self._aggregate_text_embedding(\n features['appdesc_token_id'], appdesc_embeddings)\n\n if training:\n appdesc_embeddings = tf.nn.dropout(\n appdesc_embeddings, rate=self._hparams['dropout'])\n\n return appdesc_embeddings", "def load_description():\n with open('description.txt') as description:\n return [line.strip() for line in description]", "def to_adverb(self):\n return self" ]
[ "0.605831", "0.5822134", "0.57419574", "0.573242", "0.55850464", "0.5502515", "0.5492401", "0.5489824", "0.5483387", "0.5447286", "0.54260534", "0.5316058", "0.52798134", "0.5279397", "0.5267892", "0.5265623", "0.525828", "0.52477735", "0.5244685", "0.5200043", "0.51937425", "0.51871073", "0.51700115", "0.5165577", "0.51619357", "0.5148933", "0.5103721", "0.5102739", "0.5100347", "0.5086335", "0.50788647", "0.50779635", "0.50728434", "0.50336117", "0.50302374", "0.5019083", "0.5006002", "0.5004376", "0.4975105", "0.49738634", "0.49696755", "0.4968562", "0.496571", "0.49637082", "0.49592713", "0.492924", "0.4923535", "0.49155068", "0.4911112", "0.4907792", "0.4878104", "0.48605114", "0.4850821", "0.4838524", "0.4831847", "0.48165113", "0.48138934", "0.48116457", "0.47989595", "0.47848463", "0.47799423", "0.47780952", "0.4775788", "0.47711614", "0.4767932", "0.47675723", "0.4766291", "0.47629923", "0.4759205", "0.4755667", "0.47549325", "0.47523794", "0.47502387", "0.474666", "0.47450402", "0.4741852", "0.47403827", "0.47392106", "0.4737233", "0.47322586", "0.4723873", "0.47166508", "0.4715262", "0.4712803", "0.4711702", "0.47082305", "0.4706386", "0.47063842", "0.47056648", "0.47053358", "0.470437", "0.47019207", "0.47002774", "0.46991658", "0.4698273", "0.46982226", "0.469817", "0.46952024", "0.46864656", "0.4686065" ]
0.81158966
0
Returns the noun, including all its describing adjectives, as a string.
def full_string(self): return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True\r\n \r\n if p2 == \"s\":\r\n # ends by s\r\n p.append(head + \"s\")\r\n \r\n elif p2 is not None:\r\n # word\r\n p.append(p2)\r\n \r\n elif p1 == \"es\":\r\n # add es\r\n p.append(head + \"es\")\r\n \r\n elif p1 is not None:\r\n # use term\r\n p.append(p1)\r\n \r\n elif p1 is None and p2 is None:\r\n p.append(head+\"s\")\r\n\r\n for k,a in t.args.items():\r\n if not a.is_named():\r\n if k == 0 or k == 1:\r\n continue\r\n \r\n p.append(a.as_string())\r\n \r\n return (s, p, is_uncountable)", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=\"with \" + self.text)", "def noun_string(data_org):\n chains = []\n tokens = word_tokenize(data_org)\n #tokenize to prepare for tagging\n w_tag = dict(nltk.pos_tag(tokens))\n chain = []\n for w, tag in w_tag.items():\n #find all nouns based on treebank format\n if tag.startswith('N'):\n chain.append(w)\n else:\n if len(chain) >= 3:\n chains.append(\" \".join(chain))\n chain = []\n\n #move information to dataframe for printing to excel\n df_noun_string = pd.DataFrame({'Noun Strings (3+ Nouns in a row)': chains}, columns = ['Noun Strings (3+ Nouns in a row)'])\n return df_noun_string", "def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out", "def get_compound_noun( ngram ):\n try:\n pattern = re.compile( '((?: ?\\\\b[^\\\\s]+(?:/NN.?/[a-z]+/[\\\\d]+)){2,})' )\n match = re.search( pattern, ngram )\n if match:\n compound = ''\n contains_root = False\n tokens = match.group().strip().split(' ')\n for t in tokens:\n # embed()\n items = t.split('/')\n compound += ( items[0] + ' ' )\n if items[3] == 0:\n contains_root = True\n if contains_root:\n return compound\n else:\n return None\n else:\n return None\n \n except ValueError:\n return None", "def get_article_str(article_sents):\n article_str = \"\"\n for nlp_sent in article_sents:\n article_str += (' ' + nlp_sent.text + ' ')\n return article_str", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=self.text + \" to\")", "def indefinite(self):\n return \"an\" if self.short_desc[0] in 'aeiou' else \"a\"", "def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()", "def to_adverb(self):\n\n text = self.text\n ending = text[-1]\n if ending == \"e\":\n text = text[0:-1]+\"ly\"\n else:\n text = text+\"ly\"\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=text)\n\n # return AdverbPhrase(**self.locals(skip=[\"text\", \"typ\", \"variants\"]),\n # text=text,\n # **self.variants)", "def a(noun):\n if p.singular_noun(noun) is not False:\n return noun\n else:\n return p.a(noun)", "def get_abbreviated_description(self):\n word_array = str(self.description).split()[:25]\n abbreviated_description = \" \".join(word_array)\n return abbreviated_description", "def __str__(self):\n return self.underscoreSentence.get().__str__()", "def get_sentence(self):", "def short_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"short_description\")", "def greco_latin_plural_noun(base_token=None):\n\n output_string = \"\"\n if base_token is not None:\n if base_token.endswith(\"us\"):\n output_string = base_token[:-2] + \"i\"\n elif base_token.endswith(\"ma\"):\n output_string = base_token + \"ta\"\n elif base_token.endswith(\"a\"):\n output_string = base_token[:-1] + \"ae\"\n elif base_token.endswith((\"on\", \"um\")):\n output_string = base_token[:-2] + \"a\"\n elif base_token.endswith(\"sis\"):\n output_string = base_token[:-3] + \"ses\"\n elif base_token.endswith(\"is\"):\n output_string = base_token[:-2] + \"ides\"\n elif base_token.endswith(\"men\"):\n output_string = base_token[:-3] + \"mina\"\n elif base_token.endswith(\"ex\"):\n output_string = base_token[:-2] + \"ices\"\n elif base_token.endswith(\"x\"):\n output_string = base_token[:-1] + \"ces\"\n\n return output_string", "def to_string(self):\n return \" \".join(self._words)", "def __repr__(self):\r\n s = 'Words:\\n' + str(self.words) + '\\n\\n'\r\n s += 'Word lengths:\\n' + str(self.wordlengths) + '\\n\\n'\r\n s += 'Stems:\\n' + str(self.stems) + '\\n\\n'\r\n s += 'Sentence lengths:\\n' + str(self.sentencelengths) + '\\n\\n'\r\n s += 'Gerunds:\\n' + str(self.gerund)\r\n return s", "def _get_sentence(sentence_data):\n return \" \".join([word for word, ne_tag in sentence_data])", "def dish_str(n:Dish):\r\n return (n.name + \" $\" + str(n.price) + \" \" + str(n.calories) + \" cal\")", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def find_noun(sent):\n noun = None\n\n if not noun:\n for w, p in sent.pos_tags:\n if p == 'NN': # This is a noun\n noun = w\n break\n if noun:\n #logger.info(\"Found noun: %s\", noun)\n pprint(\"FOUND NOUN\")\n pprint(noun)\n\n return noun", "def NoDeltaGExplanation(self):\n for compound in self.reactants:\n if compound.compound.no_dg_explanation:\n name = compound.compound.common_names.all()[0].name\n return '%s %s' % (name,\n compound.compound.no_dg_explanation.lower())\n return None", "def base_verb_string(self): \n if self.is_derived: \n _base_verb_str= getattr(self, '_base_verb_string', None)\n if is_empty_str(_base_verb_str):\n if self.is_phrase:\n # a phrase means the base verb is the actual verb being conjugated.\n self._base_verb_string = self.inf_verb_string\n elif self.reflexive == Reflexive.base_reflexive:\n self._base_verb_string = self.core_characters + self.inf_ending +'se'\n else: \n self._base_verb_string = self.core_characters + self.inf_ending\n return self._base_verb_string\n else:\n return None", "def construct_response(pronoun, noun, verb):\n resp = []\n\n if pronoun:\n resp.append(pronoun)\n\n # We always respond in the present tense, and the pronoun will always either be a passthrough\n # from the user, or 'you' or 'I', in which case we might need to change the tense for some\n # irregular verbs.\n if verb:\n verb_word = verb[0]\n if verb_word in ('be', 'am', 'is', \"'m\"): # This would be an excellent place to use lemmas!\n if pronoun.lower() == 'you':\n # The bot will always tell the person they aren't whatever they said they were\n resp.append(\"aren't really\")\n else:\n resp.append(verb_word)\n if noun:\n pronoun = \"an\" if starts_with_vowel(noun) else \"a\"\n resp.append(pronoun + \" \" + noun)\n\n resp.append(random.choice((\"bro\", \"lol\", \"bruh\", \"nigga\", \"ha ha ha xD\", \"zzz.. oh i fell asleep :P\")))\n\n return \" \".join(resp)", "def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def get_sentence(self):\n words = []\n for i in sorted([ind.index for ind in self.subtree_dict.keys()]):\n if isinstance(self.subtree_dict[i].label, (str, unicode)):\n words.append(self.subtree_dict[i].label)\n return ' '.join(words)", "def morph_noun(word, number, a_an, feature):\n word = morph_number(word, number)\n if not (number in ['first', 'second']) and word[-1] == 's':\n return mark_noun_as_plural(word)\n else:\n return word\n\n if a_an == 'an':\n return mark_noun_as_an(word)\n else:\n return word", "def _get_audio_feature_name(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"name\")", "def __get_relevant_words(sentence):\n nouns = None\n try:\n if sentence:\n tokens = nltk.word_tokenize(sentence)\n pos = nltk.pos_tag(tokens)\n nouns = [x[0] for x in pos if x[1].startswith('N') or x[1].startswith('F')]\n except Exception as e:\n nouns = None\n return ' '.join(nouns) if nouns else None", "def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result", "def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string", "def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def get_descriptive_name(self):\r\n long_name=str(self.year)+' '+self.make+' '+self.model\r\n return long_name.title()", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def find_pronoun(sent):\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n # pronoun = 'I' +++++++++ORIGINAL++++++++++++\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n # pronoun = 'You' +++++++++ORIGINAL++++++++++++\n pronoun = 'You'\n return pronoun", "def get_descriptive_name(self):\n long_name = f\"{self.make} {self.model} {self.year}\"\n \n return long_name.title()", "def description(self):\n retstr = \"GNP with \" + str(self.n) + \" nodes and p = \" + str(self.p)\n return retstr", "def conjugate_present_ere_verb(verb, pronoun, tense):\n avere = {\"io\": \"ho\", \"tu\": \"hai\", \"lui\": \"ha\", \"lei\": \"ha\", \"noi\": \"abbiamo\", \"voi\": \"avete\", \"loro\": \"hanno\"}\n\n irregulars = [\"avere\"]\n if verb in irregulars:\n if verb == \"avere\":\n return avere[pronoun]\n\n ere_endings = {\"io\": \"o\", \"tu\": \"i\", \"lui\": \"e\", \"lei\": \"e\", \"noi\": \"iamo\", \"voi\": \"ete\", \"loro\": \"ono\"}\n compiere_endings = {\"io\": \"o\", \"tu\": \"\", \"lei\": \"e\", \"lui\": \"e\", \"noi\": \"amo\", \"voi\": \"ete\", \"loro\": \"ono\"}\n\n if verb == \"compiere\":\n if tense == \"presente\":\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + compiere_endings[pronoun]\n return new_verb\n else:\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + ere_endings[pronoun]\n return new_verb\n else:\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + ere_endings[pronoun]\n return new_verb", "def get_nouns(lemmas_tags):\r\n nouns = []\r\n for lemma in lemmas_tags:\r\n \"\"\"si la etiqueta es None porque no tiene lemma o es un sustantivo\"\"\"\r\n if lemma[1] == None or lemma[1][0] == 'n':\r\n \"\"\"se agrega solamente el lemma\"\"\"\r\n nouns.append(lemma[0])\r\n return nouns", "def get_article_as_string(article,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n article_string = ''\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word, preprocess_type)\n if article_string == '':\n article_string = preprocessed_word\n else:\n article_string += (' ' + preprocessed_word)\n return article_string", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.word\n string += \"\\n\" + self.children[0].get_string(n + 1)\n string += \"\\n\" + self.children[1].get_string(n + 1)\n return string", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def get_descriptive_name(self):\r\n long_name = str(self.year)+' '+self.make + ' '+self.model\r\n return long_name.title()", "def get_descriptive_name(self): # 定义描述完整信息的方法\n long_name = str(self.year) + \" \" + self.make + \" \" + self.model # 拼接变量字符串并赋值变量\n return long_name.title() # 返回字符串并首字母大写", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def lemma(self) -> str:", "def __str__(self):\n return \"{}\".format(self.word)", "def createSentence(self, n=0, v=0, o=0, p=0,prep=True):\n sentence = ''\n if not n:\n n = np.random.randint(1, 5)\n if not v:\n v = np.random.randint(1, 5)\n if not o:\n o = np.random.randint(1, 5)\n sentence += self.createPhrase(nwords=n) + ' '\n if sentence[:-1] not in ('mi', 'sina'):\n sentence += 'li '\n sentence += self.createPhrase(nwords=v) + ' e '\n sentence += self.createPhrase(nwords=o)\n if prep:\n if not p:\n p = np.random.randint(1, 5)\n sentence += ' ' + np.random.choice(self.prepositions) + ' ' + self.createPhrase(nwords=p)\n return sentence", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n return long_name.title()", "def descString(self):\n return \"\".join ([self.Name, \" (AR \", str(self.AR), \", Max DEX \"\\\n , str(self.MaxDEXMod), \") - \", str(self.Value), \" gp\"])", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "def provn_representation(self):\n return \"'%s'\" % self._str", "def get_human_readable(self):\n\n def yesno(key):\n if getattr(self, key) and getattr(self, key) > 0:\n return \"Y\"\n else:\n return \"N\"\n\n keys = (\n \"pvs1\",\n \"ps1\",\n \"ps2\",\n \"ps3\",\n \"ps4\",\n \"pm1\",\n \"pm2\",\n \"pm3\",\n \"pm4\",\n \"pm5\",\n \"pm6\",\n \"pp1\",\n \"pp2\",\n \"pp3\",\n \"pp4\",\n \"pp5\",\n \"ba1\",\n \"bs1\",\n \"bs2\",\n \"bs3\",\n \"bs4\",\n \"bp1\",\n \"bp2\",\n \"bp3\",\n \"bp4\",\n \"bp5\",\n \"bp6\",\n \"bp7\",\n )\n result = \", \".join([\"%s: %s\" % (key.upper(), yesno(key)) for key in keys])\n result += \", ACMG classification: %s\" % self.class_auto\n if self.class_override:\n result += \", ACMG class. override: %s\" % self.class_override\n return result", "def __str__(self):\n # TODO also show relative abundance\n s = \"{} ion species\\n\".format(len(self.ions))\n for ion in self.ions:\n s += \" {:2s} (Z = {:3d}) {:.3e} particles\\n\".format(ion.getName(), ion.getCharge(), ion.getParticleNumber())\n \n return s", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def sentence(self) -> str:\n id_word = {nodeattr['position']-1: nodeattr['form']\n for nodeid, nodeattr in self.syntax_nodes.items()}\n return ' '.join([id_word[i] for i in range(max(list(id_word.keys()))+1)])", "def get_descriptive_name(self):\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()", "def get_nouns(self):\n word_punct_token = WordPunctTokenizer().tokenize(self.sentence_string)\n\n clean_tokens = []\n for token in word_punct_token:\n token = token.lower()\n\n # remove any value that are not alphabetical\n new_token = re.sub(r\"[^a-zA-Z]+\", \"\", token)\n\n # remove empty value and single character value\n if new_token != \"\" and len(new_token) >= 2:\n vowels = len([v for v in new_token if v in \"aeiou\"])\n if vowels != 0: # remove line that only contains consonants\n clean_tokens.append(new_token)\n\n noun_types = [\"NN\", \"NNS\", \"NNP\", \"NNPS\", \"N\"]\n is_noun = lambda pos: pos in noun_types\n nouns = [word for (word, pos) in nltk.pos_tag(clean_tokens) if is_noun(pos)]\n\n if nouns:\n return nouns\n else:\n raise InvalidSentenceError(self.sentence_string)", "def print_sentence_voice(self, final_subj, final_obj, verb, v_aux, v_tense, subj_tag, subj_word, final_mod2=None, final_root=None):\n new_verb = ''\n s_sentence1 = s_sentence2 = ''\n\n new_verb = gl.verb_conjugate(verb, v_aux, v_tense) + \" \"\n\n if new_verb.strip() == \"\":\n new_verb = gl.verb_conjugate(verb, v_aux, \"VMIS3S0\") + \" \"\n\n\n for k in sorted(final_subj.keys()):\n s_sentence1 += final_subj[k] + \" \"\n\n for k in sorted(final_obj.keys()):\n s_sentence2 += final_obj[k] + \" \"\n\n if final_mod2 != None:\n for k in sorted(final_mod2.keys()):\n s_sentence2 += final_mod2[k] + \" \"\n if final_root != None:\n for k in sorted(final_root.keys()):\n s_sentence2 += final_root[k] + \" \"\n\n\n #removing errors in punctuation\n s_sentence1 = s_sentence1.replace(\", .\", \".\").replace(\"; .\", \".\").replace(\": .\", \".\").replace(\", ?\", \"?\").replace(\"; ?\", \"?\").replace(\": ?\", \"?\").replace(\", !\", \"!\").replace(\"; !\", \"!\").replace(\": !\", \"!\").replace(\". .\", \".\")\n s_sentence2 = s_sentence2.replace(\", .\", \".\").replace(\"; .\", \".\").replace(\": .\", \".\").replace(\", ?\", \"?\").replace(\"; ?\", \"?\").replace(\": ?\", \"?\").replace(\", !\", \"!\").replace(\"; !\", \"!\").replace(\": !\", \"!\").replace(\". .\", \".\")\n\n\n return self.runTrueCaser(s_sentence1 + new_verb + s_sentence2)", "def getSynopsis(self):\r\n\r\n if self.parent is None:\r\n command = self.__class__.__name__\r\n offset = command.find('Options')\r\n default = \"Usage: {0}{1}\".format(command[:offset],\r\n (self.longOpt and \" [options]\") or '')\r\n else:\r\n default = (self.longOpt and \" [options]\") or ''\r\n\r\n synopsis = getattr(self, \"synopsis\", default).rstrip()\r\n\r\n if self.parent is not None:\r\n synopsis = ' '.join((self.parent.getSynopsis(),\r\n self.parent.subCommand, synopsis))\r\n\r\n return synopsis", "def getDisambiguatedByNextNoun(self, word):\n\t\treturn disambig_const.DISAMBIGUATATION_TABLE.get(word, {}).get('noun', {}).get('vocalized', word);", "def get_root_verb(text):\n doc = nlp(text)\n for token in doc:\n if token.dep_ == \"ROOT\" and token.head.pos_ == \"VERB\":\n return str(token)\n else:\n return \"\"", "def parse(text):\n parts = text.split(' ')\n noun = Noun(parts[0], int(parts[1]))\n\n parts = parts[2:]\n while len(parts) > 0:\n noun.add_adjectives(Word(parts[0], int(parts[1])))\n parts = parts[2:]\n return noun", "def name(self):\n return self.tr('NATCAPES')", "def convert(tag):\r\n if is_noun(tag):\r\n return wn.NOUN\r\n if is_adjective(tag):\r\n return wn.ADJ", "def __str__(self):\n segments = []\n if self.comment:\n segments.append('\"%s\"' % self.comment)\n if self.sg:\n if self.dbow_words:\n segments.append('dbow+w') # also training words\n else:\n segments.append('dbow') # PV-DBOW (skip-gram-style)\n\n else: # PV-DM...\n if self.dm_concat:\n segments.append('dm/c') # ...with concatenative context layer\n else:\n if self.cbow_mean:\n segments.append('dm/m')\n else:\n segments.append('dm/s')\n segments.append('d%d' % self.docvecs.vector_size) # dimensions\n if self.negative:\n segments.append('n%d' % self.negative) # negative samples\n if self.hs:\n segments.append('hs')\n if not self.sg or (self.sg and self.dbow_words):\n segments.append('w%d' % self.window) # window size, when relevant\n if self.vocabulary.min_count > 1:\n segments.append('mc%d' % self.vocabulary.min_count)\n if self.vocabulary.sample > 0:\n segments.append('s%g' % self.vocabulary.sample)\n if self.workers > 1:\n segments.append('t%d' % self.workers)\n return '%s(%s)' % (self.__class__.__name__, ','.join(segments))", "def _get_sentence(ne_tagged_line):\n return \" \".join([word for word, tag in ne_tagged_line])", "def lemma(self):\n if self.metadata.get('COMLEX'):\n return self.metadata['COMLEX'][0].features['ORTH'][0][1:-1]\n elif self.label in ['NNP', 'NNPS']:\n return self.text\n else:\n return self.text.lower()", "def description(self, depth=1, indent=0, itemize=\"\", output=None, **kwargs):\n if depth is None or depth < 0:\n return \"\"\n output_strio = StringIO()\n if self.label is None:\n label = \"<Unnamed Taxon>\"\n else:\n label = \"'%s'\" % self.label\n output_strio.write('%s%s Taxon object at %s (%s): %s' % (indent*' ', itemize, hex(id(self)), self.oid, label))\n s = output_strio.getvalue()\n if output is not None:\n output.write(s)\n return s", "def __repr__(self):\n s= 'text model name: ' + self.name + '\\n'\n s+= 'number of words: ' + str(len(self.words)) + '\\n'\n s+='number of word lengths: ' + str(len(self.word_lengths))+'\\n'\n s+='number of word stems: ' + str(len(self.stems)) + '\\n'\n s+='number of sentence lengths: ' + str(len(self.sentence_lengths)) +'\\n'\n s+='number of word suffixes: '+ str(len(self.endings))\n \n return s", "def full_text(self) -> str:\n return self._full_text", "def __repr__(self):\n return '{} <{}:{} \"{}\" ({}; Pop: {}; Disc Pops: {}; Rep Pops: {})>'.\\\n format(self.id, self.author, self.journal, self.title,\n self.phenotype.phenotype, self.population.population,\n self.disc_pops.to_simple_str(),\n self.rep_pops.to_simple_str())", "def get_descriptive_name(self):\n return f\"{self.year} {self.make} {self.model}\".title()", "def get_nouns(root):\n nouns = []\n for child in root.findall(\"./xdrs/taggedtokens/tagtoken/tags\"):\n noun = False\n for grandchildren in child.findall(\"./tag[@type='pos']\"):\n if grandchildren.text == 'NN' or grandchildren.text == 'NNS':\n noun = True\n if noun == True:\n for grandchildren in child.findall(\"./tag[@type='lemma']\"):\n nouns.append(grandchildren.text)\n return nouns", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def __str__(self):\n s = \"\"\n s += self.synset.name + \"\\t\"\n s += \"PosScore: %s\\t\" % self.pos_score\n s += \"NegScore: %s\" % self.neg_score\n return s", "def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features", "def to_adverb(self):\n return self", "def asngen(pool):\n pool = AssociationPool.read(pool)\n rules = AssociationRegistry()\n (asns, orphaned) = generate(pool, rules)\n result = []\n result.append('There where {:d} associations found.'.format(len(asns)))\n result.append('There where {:d} orphaned exposures.'.format(len(orphaned)))\n for assocs in asns:\n result.append(assocs.__str__())\n\n return '\\n'.join(result)", "def to_string(self):\n return self.dungeon_string", "def get_short_name(self):\n split = self.name.split(' - ')\n # author, year, and first couple of words of paper title\n return \"{} ({}), {}\".format(split[0], split[1], \" \".join(split[2].split(' ')[:3]))", "def obj_pretty(objective):\n if objective.objective_type == u'/datum/objective/assassinate':\n return 'Asassinate {} the {}.'.format(objective.target_name, objective.target_role)\n else:\n return objective.objective_desc", "def calc_conservation_string(aln):\n\n percids = calc_conservation(aln)\n\n # find identity positions\n identity = \"\"\n for pid in percids:\n if pid == 1:\n identity += \"*\"\n elif pid > .5:\n identity += \".\"\n else:\n identity += \" \"\n\n return identity", "def describe(self) -> str:\n return (\n \"{name} {surname} è nata/o a {birth_municipality} ({birth_province_code}) il {birthdate}.\"\n \" Ora vive a {municipality} ({province_code}) in {address} {house_number}.\"\n ).format(**self._data)", "def get_transcript(self):\n\n rna = \"\"\n for i in range(len(self.__sequentie)):\n if self.__sequentie[i] == \"A\":\n rna += \"U\"\n if self.__sequentie[i] == \"T\":\n rna += \"A\"\n if self.__sequentie[i] == \"C\":\n rna += \"G\"\n if self.__sequentie[i] == \"G\":\n rna += \"C\"\n if self.__sequentie[i] == \"N\":\n rna += \"N\"\n return rna", "def is_noun(tag_string):\n result = True if tag_string in POS.POS_tags.noun_tags else False\n return result", "def __repr__(self):\n ct = shortest_string_in_list(self.cause_texts)\n et = shortest_string_in_list(self.effect_texts)\n ev = ','.join(self.evidence_texts)\n return '%s -> %s [%s, %s, %s]' % (ct, et, ev,\n repr(self.cause_polarity),\n repr(self.effect_polarity))", "def de_or_het(noun):\n\tnoun = noun.lower()\n\n\t# we have an excel files saved as csv, that contain words that are definately het-words\n\twith open('het_woorden', 'r') as f:\n\t\thet_woorden = f.read().split('\\n')\n\n\thet_woorden = [w.lower() for w in het_woorden]\t\n\n\t\n\t# and we have an excel file that contains dubious words\n\twith open('ambiguous_nouns', 'r') as f:\n\t\tambiguous_nouns = f.read().split('\\n')\n\t\n\tambiguous_nouns = [w.lower() for w in ambiguous_nouns]\t\n\tmessage = \"\"\"\\nYour noun may be either a 'de' or a 'het' word. Chosing one or the\nother may change the meaning of '{0}'. When in doubt, advice a dictionary.\n\t\"\"\"\n\n\t# next we want to see whether our noun is a 'de' or a 'het' noun:\n\tif noun in ambiguous_nouns:\n\t\traise Exception(message.format(noun))\n\telif noun in het_woorden or noun.endswith('je'):\n\t\tarticle = 'het'\n\telse:\n\t\tarticle = 'de'\t\n\t\n\t# last, we want the article and the noun returned as one string.\t\n\treturn article + \" \" + noun" ]
[ "0.6249946", "0.6021164", "0.600624", "0.5979195", "0.59327227", "0.58711636", "0.57597136", "0.5741161", "0.57387596", "0.57079136", "0.56950766", "0.5683717", "0.56733876", "0.5652428", "0.5563646", "0.55163616", "0.5513116", "0.5472722", "0.5459045", "0.5425532", "0.5423237", "0.5419519", "0.541946", "0.5412078", "0.5409855", "0.5401926", "0.5398518", "0.5398444", "0.53919876", "0.5374007", "0.5357182", "0.53556603", "0.53436214", "0.53412294", "0.5340815", "0.53275573", "0.5325488", "0.53232026", "0.5322284", "0.5319297", "0.5308966", "0.53024125", "0.53006464", "0.53005457", "0.52948767", "0.52943075", "0.5293416", "0.5290383", "0.5288274", "0.52802414", "0.52763605", "0.527504", "0.5272397", "0.52679", "0.52650994", "0.5264727", "0.5264727", "0.5264727", "0.5264727", "0.5264727", "0.5264727", "0.5264727", "0.5264727", "0.52604556", "0.5249635", "0.52434605", "0.5237351", "0.5231963", "0.5230937", "0.52306044", "0.5226371", "0.52149886", "0.5214102", "0.5203698", "0.52020395", "0.5201215", "0.5201148", "0.5188947", "0.5186473", "0.51836175", "0.51794434", "0.5169279", "0.5169076", "0.5164772", "0.5161175", "0.515577", "0.5152566", "0.5148206", "0.5139596", "0.51323354", "0.51312345", "0.5130883", "0.5129728", "0.5128561", "0.51131666", "0.5110764", "0.5109203", "0.51040524", "0.5102234", "0.50952137" ]
0.69946307
0
Parse a noun object from a data file containing nouns and their describing adjectives.
def parse(text): parts = text.split(' ') noun = Noun(parts[0], int(parts[1])) parts = parts[2:] while len(parts) > 0: noun.add_adjectives(Word(parts[0], int(parts[1]))) parts = parts[2:] return noun
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def process_data_from_input_file(triplet):\n\n sentence = triplet.subject + ' ' + triplet.predicate + ' ' + triplet.object\n doc = nlp(unicode(sentence))\n root = doc[0]\n for t in doc:\n if t.pos_ == 'VERB' and t.head == t:\n root = t\n # elif t.pos_ == 'NOUN'\n\n # also, if only one sentence\n # root = doc[:].root\n\n\n \"\"\"\n CURRENT ASSUMPTIONS:\n - People's names are unique (i.e. there only exists one person with a certain name).\n - Pet's names are unique\n - The only pets are dogs and cats\n - Only one person can own a specific pet\n - A person can own only one pet\n \"\"\"\n\n\n # Process (PERSON, likes, PERSON) relations\n if root.lemma_ == 'like':\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and triplet.object in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and \"n't\" not in triplet.predicate:\n s = add_person(triplet.subject)\n o = add_person(triplet.object)\n s.likes.append(o)\n\n if root.lemma_ == 'be' and triplet.object.startswith('friends with'):\n fw_doc = nlp(unicode(triplet.object))\n with_token = [t for t in fw_doc if t.text == 'with'][0]\n # get text after with\n after_with = fw_doc.text.split(with_token.text+ ' ')[1]\n people = []\n for p in after_with.split(' '):\n if nlp(p)[0].tag_ == 'NNP':\n people.append(nlp(p)[0].text)\n # fw_who = [t for t in with_token.children if t.dep_ == 'pobj'][0].text\n # fw_who = [e for e in fw_doc.ents if e.label_ == 'PERSON'][0].text\n for p in people:\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(triplet.subject)\n o = add_person(p)\n s.likes.append(o)\n o.likes.append(s)\n if root.lemma_ == 'be' and triplet.object == 'friends':\n fw_doc = nlp(unicode(triplet.subject))\n and_token = [t for t in fw_doc if t.text == 'and']\n if and_token:\n and_token = and_token[0].text\n if and_token == 'and' and fw_doc[0].text in [e.text for e in doc.ents if e.label_ == 'PERSON'] and fw_doc[2].text in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(fw_doc[0].text)\n o = add_person(fw_doc[2].text)\n s.likes.append(o)\n o.likes.append(s)\n\n # Process (PET, has, NAME) Mary's dog's name is Rover\n if triplet.subject.endswith('name') and ('dog' in triplet.subject or 'cat' in triplet.subject):\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n\n # handle single names, but what about compound names? Noun chunks might help.\n if (len(obj_span) == 1 or len(obj_span) == 2) and obj_span[-1].pos_ == 'PROPN':\n name = triplet.object\n subj_start = sentence.find(triplet.subject)\n subj_doc = doc.char_span(subj_start, subj_start + len(triplet.subject))\n\n s_people = [token.text for token in subj_doc if token.ent_type_ == 'PERSON']\n assert len(s_people) == 1\n s_person = select_person(s_people[0])\n\n pet = get_persons_pet(s_person.name)\n\n pet.name = name\n s_person.has.append(pet)\n\n # Process (Who has dog)\n if root.lemma_ == 'have'and ('dog' in triplet.object or 'cat' in triplet.object):\n # find pets name and instantiate name empty str\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n name = ''\n\n if obj_span[-1].pos_ == 'PROPN':\n name = obj_span[-1].text\n s = add_person(triplet.subject)\n s_pet_type = 'dog' if 'dog' in triplet.object else 'cat'\n pet = add_pet(s_pet_type, name)\n s.has.append(pet)\n\n date = [e.text for e in doc.ents if e.label_ == 'DATE']\n gpe = [e.text for e in doc.ents if e.label_ == 'GPE']\n person = [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG']\n # if person and GPE exists, we add it into trip(departs_on, departs_to)\n if person and (gpe or date):\n s = add_person(triplet.subject)\n o = add_trip(date, gpe)\n s.travels.append(o)", "def loadDataFile(self, filename):\n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'start', 'end', 'parent' \n ])", "def noun_phrase_chunking(part_of_speech_data):\n\n grammar = r\"\"\"\n NP: {<DT|JJ|NN.*>+}\n PP: {<IN><NP>}\n VP: {<VB.*><NP|PP|CLAUSE>+$}\n CLAUSE: {<NP><VP>}\n \"\"\"\n\n grammar2 = r\"\"\"\n NP: {<DT|NN>+} # Chunk sequences of NN and DT\n {<DT><JJ><NN>} # Chunk det+adj+noun\n \"\"\"\n\n return RegexpParser(grammar).parse(part_of_speech_data).draw()", "def parse(self, word):\n word = self.son.segs(word)\n son_map = self._sonority_map(word)\n son_map = self._mark_offglides(son_map)\n son_map = self._adjust_anom_fric_cod(son_map)\n son_map = self._adjust_anom_fric_ons(son_map)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)", "def parse(self, infile):\r\n raise NotImplementedError()", "def loadDataFile(self, filename):\n \n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'emapa', 'ts', 'parent']\n )", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict", "def parse(las_file):\n io_stream = io.TextIOWrapper(las_file)\n \n entry_date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n entry_filename = 'las_file-' + entry_date + '.las'\n\n entry = SectionInfo()\n entry.filename = entry_filename\n section = ''\n\n for line in io_stream.readlines():\n\n line = line.rstrip()\n\n if not line:\n continue\n\n # Lines beginning with '~' denote the next section header.\n if line[0] == '~':\n section = line\n continue\n # Skip comment lines.\n elif line[0] == '#':\n continue\n\n # LAS standard option 'OTHER' section\n if section[1] == 'O': \n entry.value = line\n entry.section = section\n # The rest of the standard metadata sections\n elif section[1] in ['V', 'W', 'C', 'P']:\n entry = parse_formatted_section_line(section, line, entry)\n # the data section and non-standard sections\n else:\n # print(\"Non-Metadata-Section: [{}]: [{}]\".format(section[0:2], line))\n continue\n\n # Write entry to db\n entry.save()\n\n # Initialize next entry\n entry = SectionInfo()\n entry.filename = entry_filename\n\n return entry_filename", "def read_data(filename, prefix=None):\n p_data = {}\n with open(filename) as f:\n # This first line is the header for the entire file.\n line = f.next()\n line = line.strip()\n # prev_line = line\n top_header = line.split(',')\n if not top_header:\n # Don't parse this for now.\n pass\n # Now read in per-participant data.\n while True:\n word_list = []\n all_words_data = {}\n # The first line for the participant is a header.\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n p_header = line.split(',')\n\n # The participant's ID # comes first.\n p_id = p_header[0]\n if not p_id:\n # This happens when the previous participant didn't answer.\n \"\"\"\n print 'previous line:', prev_line\n print 'current line:', line\n print 'p header:', p_header\n print\n \"\"\"\n continue\n if prefix:\n p_id = prefix + p_id\n # print 'SN #', p_id\n # The number of N/A's this p is at 28.\n try:\n p_nas = int(p_header[28])\n except ValueError:\n # This happens when an RA messes up the file.\n \"\"\"\n print 'nas: previous line:', prev_line\n print 'nas: current line:', line\n print 'nas: p header:', p_header\n print\n \"\"\"\n raise\n # print \"NA's: #\", p_nas\n # Check if this participant left everything blank.\n # XXX: Have to hard-code this.\n if p_nas == 20:\n \"\"\"Don't record anything.\n p_data[p_id] = {'words': None,\n 'word_data': None,\n 'nas': None,\n 'overall': None}\n \"\"\"\n continue\n # The next line after the header has both the data\n # for the first word and overall statistics.\n # prev_line = line\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n word, word_data, overall_data = parse_first_line(line.split(','))\n word_list.append(word)\n all_words_data[word] = word_data\n # Now read data for the rest of the words.\n for line in f:\n line = line.strip()\n word, word_data = parse_data_lines(line.split(','))\n if word == '':\n \"\"\"\n print \"loop's previous line:\", prev_line\n print \"loop's current line:\", line\n print\n \"\"\"\n # prev_line = line\n break\n word_list.append(word)\n all_words_data[word] = word_data\n # prev_line = line\n # Compute per-word averages\n all_total_avg, future_total_avg, past_total_avg = \\\n datacomputer.compute_all_future_past(all_words_data)\n overall_data['all'] = all_total_avg\n overall_data['future'] = future_total_avg\n overall_data['past'] = past_total_avg\n p_data[p_id] = {'words': word_list,\n 'word_data': all_words_data,\n 'nas': p_nas,\n 'overall': overall_data}\n # print 'p_data'\n # print p_data[p_id]\n # print\n print \"Processed {} participants' data\".format(len(p_data))\n return p_data", "def _parse(self, infile):\n raise NotImplementedError()", "def parse_voc(filename):\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def import_data(in_file):\n\n print '\\n\\tImport data'\n sentence = []\n concept = []\n sentences = []\n concepts = []\n for line in open(in_file, 'r'):\n if line != '\\n':\n sentence += [ line.split()[0] ]\n concept += [ line.split()[1] ]\n else:\n sentences += [ sentence ]\n concepts += [ concept ]\n sentence = [ ]\n concept = [ ]\n pos = []\n lemma = []\n poss = []\n lemmas = []\n for line in open(in_file.replace('.data', '.feats.txt'), 'r'):\n if line != '\\n':\n pos += [ line.split()[ 1 ] ]\n lemma += [ line.split()[ 2 ] ]\n else:\n poss += [ pos ]\n lemmas += [ lemma ]\n pos = [ ]\n lemma = [ ]\n print '\\t--done'\n return sentences, poss, lemmas, concepts", "def read_pronunciation(pronunciation_file):\n # file = open('dictionary.txt', 'r')\n #\n # for line in file:\n # print line\n\n ################# https://m.reddit.com/r/CompSciPortfolio/comments/303fyo/assignment_3_poetry_reader/\n\n pronunciation_dictionary = {}\n line = pronunciation_file.readline()\n while line.startswith(';;;'):\n line = pronunciation_file.readline()\n while line != '':\n stripped_line = line.strip()\n separation = stripped_line.find(' ')\n pronunciation_dictionary[stripped_line[:separation]] = stripped_line[(separation + 2):].split()\n line = pronunciation_file.readline()\n return pronunciation_dictionary\n\n\n\n # my_list = {}\n # for line in pronunciation_file.readlines():\n # line = line.strip()\n # if line and \";;;\" not in line:\n # r = line.split()\n # word = r[0]\n # phonemes = r[1:]\n # my_list[word] = phonemes\n # return my_list", "def main ():\n\n\tfio = fileIo('input.txt')\n text = fio.getInput()\n\n\tp = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n\tout = filter(None, p.split(text))\n\ti = 0\n\tlistOfLists = []\n\t\n\n\tfor s in out:\n\t\ti += 1\n\t\ttext = nltk.word_tokenize(s)\n\t\tpos = nltk.pos_tag(text)\n\t\tpattern = \"NP: {<DT>?<JJ>*<NN>}\"\n\t\tNPChunker = nltk.RegexpParser(pattern)\n\t\tresult = NPChunker.parse(pos)\n\t\tlistOfLists.append( result )\n\n\tprint \"Noun Count:\\n\" + str(countNouns( listOfLists ))\n\tprint \"Verb Count:\\n\" + str(countVerbs( listOfLists ))\n\tprint \"Adjective Count:\\n\" + str(countAdjectives( listOfLists ))", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def parseFile(self, filename):\n self.__filename = filename\n\n if os.path.isfile(filename) == False:\n self.LogError(\"Unable to open input file \" + str(filename))\n raise IOError\n\n self.__file = open(filename, 'r')\n\n while True:\n string = self.__file.readline()\n if string == \"\":\n break\n\n if string.upper().find(\"[SYSTEM]\") != -1:\n #print string.upper()\n self.__parseSystem()\n\n if string.upper().find(\"[GRASS]\") != -1:\n #print string.upper()\n self.__parseGrass()\n\n if string.upper().find(\"[COMPLEXDATA]\") != -1:\n #print string.upper()\n self.complexDataList.append(ComplexData(self.__file))\n\n if string.upper().find(\"[COMPLEXOUTPUT]\") != -1:\n #print string.upper()\n self.complexOutputList.append(ComplexOutput(self.__file))\n\n if string.upper().find(\"[LITERALDATA]\") != -1:\n #print string.upper()\n LD = LiteralData(self.__file)\n if LD.identifier == 'multi_output':\n self.LogWarning(\"multi_output: \" + LD.value.upper())\n if LD.value.upper() == 'TRUE':\n self.multiOutput = True\n else:\n self.literalDataList.append(LD)", "def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry", "def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)", "def parse_object(self, word_list):\n\n self.skip(word_list, 'stop')\n\n next_word = self.peek(word_list)\n\n if next_word == 'noun':\n return self.match(word_list, 'noun')\n\n elif next_word == 'direction':\n return self.match(word_list, 'direction')\n\n else:\n raise ParserError(\n 'Expected a noun or direction. Got a %s.' % next_word\n )", "def parse_data(fp):\n pass", "def _parse(self):\n with open(_join(self.man_dir, self.man_fn)) as fp:\n lines = fp.readlines()\n \n desc_indxs = []\n for i, L in enumerate(lines):\n if \"#landuse\" in L or \" # landuse\" in L:\n desc_indxs.append(i-1)\n desc_indxs.append(i-2)\n desc_indxs.append(i-3)\n \n lines = [L[:L.find('#')].strip() for L in lines]\n lines = [L for i, L in enumerate(lines) if len(L) > 0 or i in desc_indxs]\n\n del desc_indxs\n \n self.datver = lines.pop(0)\n self.nofe = int(lines.pop(0))\n self.sim_years = int(lines.pop(0))\n \n # Read Plant Growth Section\n self.plants = PlantLoops(lines, self)\n\n # Read Operation Section\n self.ops = OpLoops(lines, self)\n \n # Read Initial Condition Section\n self.inis = IniLoops(lines, self)\n \n # Read Surface Effects Section\n self.surfs = SurfLoops(lines, self)\n \n # Read Contour Section\n self.contours = ContourLoops(lines, self)\n \n # Read Drainage Section\n self.drains = DrainLoops(lines, self)\n \n # Read Yearly Section\n self.years = YearLoops(lines, self)\n \n # Read Management Section \n self.man = ManagementLoop(lines, self)", "def parse_file(self, infile, chardict, labeldict):\n examples = []\n fin = io.open(infile, 'r')\n # idx is for the index of the row in the \n # original file before shuffling and randomization\n idx = 0\n for line in fin: \n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n # print entity\n ent = map(lambda c:chardict[c], list(entity))\n lab = map(lambda l:labeldict[l] if l in labeldict else 0, label.split(','))\n examples.append((idx, ent, lab))\n idx += 1\n fin.close()\n print \"num_rows:\", len(examples), \" index\", idx\n return examples", "def __init__(self, variable, pnoun, nucleus):\n super(ProperNounExpression, self).__init__(variable, EmptyExpression(), nucleus)\n assert(pnoun in proper_nouns)\n self.pnoun = pnoun", "def read_ann_file(fileid, ann_dir):\n ann_file = \"%s/%s.ann\"%(ann_dir,fileid)\n with codecs.open(ann_file, 'r', 'utf-8') as f:\n data = f.read()\n rows = data.split('\\n')\n entities = {}\n ent_count = 0\n relations = {}\n #annotations = []\n for row in rows:\n cols = row.split(\"\\t\")\n ann_id = cols[0]\n if(u\"#\" in cols[0]):\n tmp = cols[1].split()[1:],\" \",cols[2]\n annotations.append(tmp)\n elif(len(cols)==3 and u\"T\" in cols[0]):\n # is an entity\n ent_count += 1\n ent_type = cols[1].split()[0]\n ranges = cols[1].replace(\"%s\"%ent_type,\"\")\n if \";\" in ranges:\n ranges = [{\"start\":int(r.split()[0]),\"end\":int(r.split()[1])} for r in ranges.split(';')]\n else:\n ranges = [{\"start\":int(ranges.split()[0]),\"end\":int(ranges.split()[1])}]\n entities[cols[0]] = {\"ann_id\":ann_id\n ,\"entity_type\": ent_type\n ,\"positions\": ranges\n ,\"surface\":cols[2]\n ,\"continuation\":False}\n elif(len(cols)>=2 and u\"R\" in cols[0]):\n rel_type, arg1, arg2 = cols[1].split()\n relations[cols[0]] = {\"ann_id\":ann_id\n ,\"arguments\":(arg1.split(\":\")[1], arg2.split(\":\")[1])\n ,\"relation_type\":rel_type}\n else:\n if(len(cols)>1):\n if(cols[1].split()[0]==\"Continuation\"):\n continued_entity_id = cols[1].split()[1]\n #print cols[1].split()[0],continued_entity_id\n entities[continued_entity_id][\"continuation\"] = True\n return entities, relations", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def parse_fasta(fasta_f, contig_data):\n\n basen = os.path.basename(fasta_f)\n [soil, ecotype, media] = basen.split(\"_\")[:3]\n\n with open(fasta_f, 'rU') as IN:\n for record in SeqIO.parse(IN, \"fasta\"):\n contig_data[record.description] = {'length': len(record.seq), 'soil': soil, 'ecotype': ecotype, 'media': media}", "def load_morphoit(self, path=None):\n path = path or self.path\n # f = open(path,'r')\n f = codecs.open(path, 'r', 'latin-1')\n lines = f.readlines()\n f.close()\n self.lemma_dict.clear()\n self.tag_dict.clear()\n self.suffix_dict.clear()\n self.word_tag_dict.clear()\n n_entries = 0\n for line in lines:\n line = line.strip()\n if line:\n # entry = line.split()\n entry = line.split(u'\\u0009') # entry has TAB delimited items\n n = len(entry)\n word = entry[0]\n if n > 1:\n lemma = entry[1]\n else:\n lemma = 'lemma?'\n if n > 2:\n tags = entry[2]\n if tags:\n splitted_tags = tags.split(u':')\n tag = splitted_tags[0]\n if tag == u'SMI': # smile ?\n continue\n else:\n tag = 'tag?'\n n_entries += 1\n if self.use_defaultdict:\n self.lemma_dict[lemma].append(word)\n self.tag_dict[tag] += 1\n self.word_tag_dict[word][tag] += 1\n else:\n words = self.lemma_dict.get(lemma, []); words.append(word); self.lemma_dict[lemma] = words\n self.tag_dict[tag] = self.tag_dict.get(tag, 0) + 1\n dict = self.word_tag_dict.get(word, {})\n \"\"\" only occurrence in corpus will increment the counter !\n dict[tag] = dict.get(tag, 0) + 1\n \"\"\"\n if dict.get(tag, None) is None:\n dict[tag] = 0\n self.word_tag_dict[word] = dict\n length = len(word)\n reversed = util.reverse(word)\n max = min(length, MAX_SUFFIX)\n for i in range(MIN_SUFFIX, max):\n suffix = reversed[:i]\n if self.use_defaultdict:\n self.suffix_dict[suffix][tag] += 1\n else: # defaultdicts cannot be pickled !\n dict = self.suffix_dict.get(suffix, {})\n dict[tag] = dict.get(tag, 0) + 1\n self.suffix_dict[suffix] = dict\n return len(lines), n_entries", "def get_nouns(txt):\n query = 'https://api.textgain.com/1/tag?q='\n query += urllib.parse.quote(txt, safe='')\n query += '&lang=fr&key=***'\n resp = requests.get(query)\n\n body = json.loads(resp.text)['text'][0]\n\n nouns = {}\n for iterable_elem in body:\n for elem in iterable_elem:\n if elem['tag'] == 'NOUN':\n word = elem['word']\n if word in nouns.keys():\n nouns[word] += 1\n else:\n nouns[word] = 1\n print(nouns)\n return nouns", "def read_file(f, prefix=None):\n p_data = {}\n # This first line is the header for the entire file.\n line = f.next()\n line = line.strip()\n # prev_line = line\n top_header = line.split(',')\n if not top_header:\n # Don't parse this for now.\n pass\n # Now read in per-participant data.\n while True:\n word_list = []\n all_words_data = {}\n # The first line for the participant is a header.\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n p_header = line.split(',')\n\n # The participant's ID # comes first.\n p_id = p_header[0]\n if not p_id:\n # This happens when the previous participant didn't answer.\n \"\"\"\n print 'previous line:', prev_line\n print 'current line:', line\n print 'p header:', p_header\n print\n \"\"\"\n continue\n if prefix:\n p_id = prefix + p_id\n # print 'SN #', p_id\n # The number of N/A's this p is at 28.\n try:\n p_nas = int(p_header[28])\n except ValueError:\n # This happens when an RA messes up the file.\n \"\"\"\n print 'nas: previous line:', prev_line\n print 'nas: current line:', line\n print 'nas: p header:', p_header\n print\n \"\"\"\n raise\n # print \"NA's: #\", p_nas\n # Check if this participant left everything blank.\n # XXX: Have to hard-code this.\n if p_nas == 20:\n p_data[p_id] = {'words': None,\n 'word_data': None,\n 'nas': None,\n 'overall': None}\n continue\n # The next line after the header has both the data\n # for the first word and overall statistics.\n # prev_line = line\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n word, word_data, overall_data = parse_first_line(line.split(','))\n word_list.append(word)\n all_words_data[word] = word_data\n # Now read data for the rest of the words.\n for line in f:\n line = line.strip()\n word, word_data = parse_data_lines(line.split(','))\n if word == '':\n \"\"\"\n print \"loop's previous line:\", prev_line\n print \"loop's current line:\", line\n print\n \"\"\"\n # prev_line = line\n break\n word_list.append(word)\n all_words_data[word] = word_data\n # prev_line = line\n # Compute per-word averages\n all_total_avg, future_total_avg, past_total_avg = \\\n datacomputer.compute_all_future_past(all_words_data)\n overall_data['all'] = all_total_avg\n overall_data['future'] = future_total_avg\n overall_data['past'] = past_total_avg\n p_data[p_id] = {'words': word_list,\n 'word_data': all_words_data,\n 'nas': p_nas,\n 'overall': overall_data}\n # print 'p_data'\n # print p_data[p_id]\n # print\n print \"Processed {} participants' data\".format(len(p_data))\n return p_data", "def load_conll_notags(unfile, max_slen, vocab=[], oovs={}, pads={}, lower=False, mwe=True, unk_case=True):\n # special characters used for splitting words\n split_chars = set([',', '.', ':', '-', '~', \"'\", '\"'])\n\n # punctuation that denotes when a sentence finishes\n sent_split_words = set(['.', '?', '!', ';', '—'])\n\n input_sents = []\n input_words = []\n windex = -1\n\n # number of words from which to split sentences\n LIMIT_SENT_LEN = max_slen\n\n sents = []\n if 'begin' in pads:\n next_words = [pads['begin']]\n next_syms = ['']\n next_indexs = [windex]\n sent_base_length = 1\n else:\n next_words = []\n next_syms = []\n next_indexs = []\n sent_base_length = 0\n\n # select files to use\n input_files = [unfile]\n\n # counters\n num_raw_sents = 0\n num_sents = 0\n num_words = 0\n num_oovs = 0\n\n # iterate over lines in the input files\n for ifile in input_files:\n for line in codecs.open(ifile, mode = 'r', errors = 'ignore', encoding = 'utf-8'):\n # discard newline character\n line = line[:-1]\n\n # keep adding words while in the middle of a sentence\n if line:\n word = line.split('\\t')[0]\n sym = word\n # add new original word\n windex += 1\n input_words.append(word)\n num_words += 1\n # lowercase when indicated\n if lower:\n word = word.lower()\n # use an heuristic and try to map oov words\n if vocab and word not in vocab:\n if word not in split_chars:\n if re.match('^[0-9\\.\\,-]+$', word):\n word = oovs['number']\n elif _match_word_vocab(word, vocab) != word:\n word = _match_word_vocab(word, vocab)\n elif ' ' in word or '~' in word or '-' in word and mwe:\n # attempt to split multi-word expressions\n constituents_text = re.split('[\\s~ | \\s-]+', word)\n constituents = [_match_word_vocab(w, vocab) for w in constituents_text]\n if all([True if c in vocab else False for c in constituents]):\n next_words += constituents[:-1]\n next_syms += constituents[:-1]\n next_indexs += [windex] * len(constituents[:-1])\n word = constituents[-1]\n sym = constituents[-1]\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n word = oovs['unknown']\n num_oovs += 1\n\n next_words.append(word)\n next_syms.append(sym)\n next_indexs.append(windex)\n\n # stack the current sentence upon seeing an empty line or a sentence end mark\n if not line or (len(next_words) > 3 and next_words[-4] in sent_split_words) or (len(next_words) >= LIMIT_SENT_LEN and len(sent_split_words.intersection(next_words)) < 1):\n if len(next_words) > sent_base_length:\n # split when an empty line marks a sentence end\n if not line:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n next_words = []\n next_syms = []\n next_indexs = []\n num_raw_sents += 1\n num_sents += 1\n # split when punctuation marks a sentence end\n elif len(next_words) > 3 and next_words[-4] in sent_split_words:\n split_words = next_words[:-3]\n split_syms = next_syms[:-3]\n split_indexs = next_indexs[:-3]\n if 'end' in pads:\n split_words.append(pads['end'])\n split_syms.append('')\n split_indexs.append(-1)\n sents.append(list(zip(split_words, split_indexs, split_syms)))\n next_words = next_words[-3:]\n next_syms = next_syms[-3:]\n next_indexs = next_indexs[-3:]\n num_sents += 1\n # split when the maximum sentence length is reached\n # a bad guess is better than not guessing when predicting tags\n else:\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n next_words = []\n next_syms = []\n next_indexs = []\n num_sents += 1\n\n if 'begin' in pads:\n next_words = [pads['begin']] + next_words\n next_syms = [''] + next_syms\n next_indexs = [-1] + next_indexs\n\n else:\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n\n # double check the last sentence\n if len(next_words) > sent_base_length:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n num_sents += 1\n\n # find the allowed sentence length\n print('[INFO] Number of unlabelled OOV words: ' + str(num_oovs) + ' / ' + str(num_words))\n print('[INFO] Original number of unlabelled sentences: ' + str(num_raw_sents))\n print('[INFO] Number of extracted unlabelled sentences ' + str(num_sents))\n return input_sents, sents", "def load_annos(self):\n data = None\n with open(self.anno_path, 'r') as file:\n if self.ext == '.json':\n data = json.load(file)\n\n # Label start at index 0\n if data is not None:\n for anno in data['annotations']:\n anno['category_id'] -= 1\n\n for anno in data['categories']:\n anno['id'] -= 1\n\n return data", "def _load_dat(self):\n modelfile = self.filename\n with open(modelfile) as f:\n content = f.readlines()\n\n self.comment = content.pop(0) # Comment line\n content = [x for x in content if not x.startswith('#')]\n\n for line in content:\n if('atoms' in line): self.natoms = int(line.split()[0])\n if('xlo' in line and 'xhi' in line):\n self.xsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('ylo' in line and 'yhi' in line):\n self.ysize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('zlo' in line and 'zhi' in line):\n self.zsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('atom types' in line): nelems = int(line.split()[0])\n if('Masses' in line): mflag = content.index(line) + 1\n if('Atoms' in line): aflag = content.index(line) + 1\n try:\n mflag\n except NameError:\n raise Exception(\"ERROR! You need to define the masses in the .dat file.\")\n atomtypes = {}\n while(nelems > 0):\n if(len(content[mflag].split()) == 2):\n atomtypes[int(content[mflag].split()[0])] = masses.get_znum(float(content[mflag].split()[1]))\n nelems -= 1\n mflag += 1\n self.atoms = []\n natoms = self.natoms\n while(natoms > 0):\n sline = content[aflag].split()\n if(len(sline) >= 5):\n # We found an atom\n id = int(sline[0])\n type = int(sline[1])\n x = float(sline[2])\n y = float(sline[3])\n z = float(sline[4])\n znum = atomtypes[type]\n # Add it to the model\n self.atoms.append(Atom(id,znum,x,y,z))\n natoms -= 1\n aflag += 1", "def load_ud_english(fpath):\n import os\n import re\n from collections import defaultdict\n n = 1\n\n fname = os.path.split(fpath)[1]\n\n parses = defaultdict(list)\n sent_ids = []\n newdoc_ids = []\n \n for l in open(fpath):\n ident = fname+' '+str(n)\n \n if re.match(r'\\# newdoc id', l):\n newdoc_ids.append(n)\n #newdoc_ids.append(l.split(\"=\")[-1].strip())\n \n if re.match(r'^\\d', l):\n l_split = l.strip().split()\n parses[ident].append(l_split)\n \n elif parses[ident]:\n sent_ids.append(ident)\n n += 1\n\n return newdoc_ids, len(sent_ids)", "def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True\r\n \r\n if p2 == \"s\":\r\n # ends by s\r\n p.append(head + \"s\")\r\n \r\n elif p2 is not None:\r\n # word\r\n p.append(p2)\r\n \r\n elif p1 == \"es\":\r\n # add es\r\n p.append(head + \"es\")\r\n \r\n elif p1 is not None:\r\n # use term\r\n p.append(p1)\r\n \r\n elif p1 is None and p2 is None:\r\n p.append(head+\"s\")\r\n\r\n for k,a in t.args.items():\r\n if not a.is_named():\r\n if k == 0 or k == 1:\r\n continue\r\n \r\n p.append(a.as_string())\r\n \r\n return (s, p, is_uncountable)", "def parse_rosalind(filename):\n print \"parse_rosalind should be called parse_fasta\"\n return parse_fasta(filename)", "def load_data(filename):\n with open(filename, 'r') as f:\n d = json.load(f)\n\n return d['nicknames'], d['relations'], d['directed']", "def parse (cls, raw_data):\n # Parse text\n model = NFFGModel.parse(raw_data)\n # Create new NFFG\n nffg = NFFG(id=model.id, name=model.name, service_id=model.service_id,\n version=model.version, mode=model.mode, metadata=model.metadata)\n # Load Infras\n for infra in model.node_infras:\n nffg.add_node(infra)\n # Load SAPs\n for sap in model.node_saps:\n nffg.add_node(sap)\n # Load NFs\n for nf in model.node_nfs:\n nffg.add_node(nf)\n # Load Links\n for link in model.edge_links:\n if link.src.node.type == NFFG.TYPE_NF or \\\n link.dst.node.type == NFFG.TYPE_NF:\n link.type = str(NFFG.TYPE_LINK_DYNAMIC)\n nffg.add_edge(link.src.node, link.dst.node, link)\n # Load SG next hops\n for hop in model.edge_sg_nexthops:\n nffg.add_edge(hop.src.node, hop.dst.node, hop)\n # Load Requirements\n for req in model.edge_reqs:\n nffg.add_edge(req.src.node, req.dst.node, req)\n return nffg", "def _parse_ml(self, line):\n # Parse the line\n fields = line.split('\\\\')\n if self.lang == ENGLISH:\n # pylint: disable=C0301\n # English sample:\n # 14\\abandonment\\94\\C\\\\1\\N\\N\\N\\N\\Y\\abandon+ment\\2x\\SA\\N\\N\\N\\#\\N\\N\\SA\\((abandon)[V],(ment)[N|V.])[N]\\N\\N\\N\n # From the README:\n # The eml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Cob\n # 4. MorphStatus\n # 5. Lang\n # 6. MorphCnt\n # 7. NVAffComp\n # 8. Der\n # 9. Comp\n # 10. DerComp\n # 11. Def\n # 12. Imm\n # 13. ImmSubCat\n # 14. ImmSA\n # 15. ImmAllo\n # 16. ImmSubst\n # 17. ImmOpac\n # 18. TransDer\n # 19. ImmInfix\n # 20. ImmRevers\n # 21 FlatSA\n # 22. StrucLab\n # 23. StrucAllo\n # 24. StrucSubst\n # 25. StrucOpac\n lemma = fields[0]\n word = fields[1]\n derivation = fields[21]\n elif self.lang == DUTCH:\n # pylint: disable=C0301\n # Dutch sample:\n # 19\\aalbessengelei\\7\\C\\1\\Y\\Y\\Y\\aalbes+en+gelei\\NxN\\N\\N\\(((aal)[N],(bes)[N])[N],(en)[N|N.N],(gelei)[N])[N]\\N\\N\\N\n # The dml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Inl\n # 4. MorphStatus\n # 5. MorphCnt\n # 6. DerComp\n # 7. Comp\n # 8. Def\n # 9. Imm\n # 10. ImmSubCat\n # 11. ImmAllo\n # 12. ImmSubst\n # 13. StrucLab\n # 14. StruAcAllo\n # 15. StrucSubst\n # 16. Sepa\n lemma = fields[0]\n word = fields[1]\n derivation = fields[12]\n\n # Skip multi-word entries for roots\n roots = self._get_root(derivation) if \" \" not in word else None\n return (lemma, word, roots)", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def check_nouns(elem_dictionary: dict, key: str, alet_dict: dict, last_nouns: list,\n last_events: list, turtle: list, ext_sources: bool) -> list:\n nouns = []\n for elem in elem_dictionary[key]: # The subject or object nouns\n elem_key = key[0:-1] # Create dictionary key = 'subject' or 'object'\n elem_type = elem[f'{elem_key}_type']\n elem_text = elem[f'{elem_key}_text']\n # Get rid of titles (such as Ms, Miss, Mr, ...)\n if 'FEMALE' in elem_type:\n elem_text = _remove_title_from_name(female_titles, elem_text)\n elif 'MALE' in elem_type:\n elem_text = _remove_title_from_name(male_titles, elem_text)\n head_lemma, head_text = get_head_word(elem_text)\n # poss_dict = Dictionary of nouns (keys) with their possessive modifiers (values)\n # Revised elem_text = noun text with possessives removed\n poss_dict, elem_text = _separate_possessives(elem_text)\n new_tuple = tuple()\n possible_name = empty_string # For a proper name, may contain shortened form = given + surname (any order)\n if elem_type == 'CARDINAL': # For example, 'one' in 'he has one' or in 'one of the band'\n if 'preps' in elem:\n new_tuple = _account_for_cardinal_noun(elem, elem_text, head_lemma,\n alet_dict, last_nouns, last_events, turtle, ext_sources)\n else:\n iri = re.sub(r'[^:a-zA-Z0-9_]', '_', f':{elem_text}_{str(uuid.uuid4())[:13]}').replace('__', '_')\n new_tuple = (elem_text, 'CARDINAL', [owl_thing2], iri)\n turtle.extend([f'{iri} a owl:Thing .',\n f'{iri} rdfs:label \"{elem_text}\" .'])\n elif elem_text.lower() in personal_pronouns:\n # Array of tuples of matched text, type, mappings and IRIs\n new_tuples = _check_personal_pronouns(elem_text, last_nouns)\n nouns.extend(new_tuples)\n last_nouns.extend(new_tuples)\n continue # More than 1 new tuple, so handled specifically in this code block; No need to 'drop through'\n # Not a pronoun; Check for a match in instances of the ontology\n elif ('PERSON' in elem_type or elem_type.endswith('GPE') or\n elem_type.endswith('ORG') or elem_type.endswith('NORP')):\n if space in head_lemma:\n # Get last two words in the name (for given+surname or surname+given name, Eastern or Western ordering)\n names = head_lemma.split(space)\n possible_name = f'{names[-2]} {names[-1]}'\n match_iri, match_type = check_specific_match(head_lemma, elem_type)\n if not match_iri and possible_name:\n match_iri, match_type = check_specific_match(possible_name, elem_type)\n if match_iri:\n new_tuple = (elem_text, elem_type, match_type, match_iri)\n else:\n # Check for family role and match to a name\n new_tuple = _process_family_role(head_text, elem_text, elem_type, alet_dict)\n if not new_tuple:\n # No match - Try to match text and type in last_nouns\n match_noun_tuples = _check_last_nouns(elem_text, elem_type, last_nouns)\n if match_noun_tuples:\n new_tuple = (elem_text, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])\n elif possible_name:\n # Also check given + surname\n match_noun_tuples = _check_last_nouns(possible_name, elem_type, last_nouns)\n if match_noun_tuples:\n new_tuple = (possible_name, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])\n if not new_tuple:\n # No match - Try to match text and type in alet_dict\n match_maps, match_iri = _check_alet_dict(elem_text, elem_type, alet_dict, last_nouns) # Updates last nouns\n if match_iri:\n new_tuple = (elem_text, elem_type, match_maps, match_iri)\n elif possible_name:\n # Also check given + surname\n match_maps, match_iri = _check_alet_dict(possible_name, elem_type, alet_dict, last_nouns)\n if match_iri:\n new_tuple = (possible_name, elem_type, match_maps, match_iri)\n if not new_tuple:\n # No match - Check if the noun is aligned with an event that has already been described\n event_classes, event_iri = check_event(elem_text, last_events)\n if event_iri:\n new_tuple = (elem_text, elem_type, event_classes, event_iri)\n if not new_tuple:\n # No match - Create new entity\n iri = re.sub(r'[^:a-zA-Z0-9_]', underscore, f':{elem_text.lower()}_{str(uuid.uuid4())[:13]}').\\\n replace('__', '_')\n noun_mappings, noun_turtle = create_noun_ttl(iri, elem_text, elem_type, alet_dict, ext_sources)\n new_tuple = (elem_text, elem_type, noun_mappings, iri)\n turtle.extend(noun_turtle)\n nouns.append(new_tuple)\n last_nouns.append(new_tuple)\n return nouns", "def parse_rec(filename):\n tree = et.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,\n int(bbox.find('ymin').text) - 1,\n int(bbox.find('xmax').text) - 1,\n int(bbox.find('ymax').text) - 1]\n objects.append(obj_struct)\n\n return objects", "def _parse_tsv_vocab_file(self, vocab_file: str):\n with open(vocab_file, \"r\", encoding=\"utf-8\") as f:\n for (index, line) in enumerate(f):\n title, count = line.rstrip().split(\"\\t\")\n entity = Entity(title, None)\n self.vocab[entity] = index\n self.counter[entity] = int(count)\n self.inv_vocab[index] = [entity]", "def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]", "def ReadMorphit(self):\r\n self.words = {}\r\n with codecs.open(self.MorphItFileName, 'r', 'utf-8') as f:\r\n for line in f.readlines():\r\n line = line.split()\r\n try:\r\n# print (line)\r\n self.words[line[0]] = line[2][:3]\r\n# if line[2][:3] in self.verbTags:\r\n# line[2]=line[2].split(u'+')\r\n# line[2][0]=line[2][0][line[2][0].find(u':')+1:]\r\n except:\r\n pass\r\n return self.words", "def __init__(self, file):\n with open(file, 'r') as f:\n self.vocab = json.loads(f.read())", "def from_file(file_handle, delimiter=\",\"):\r\n # Reading in the data\r\n line = file_handle.readline()\r\n line = line.rstrip(\"\\n\")\r\n line = line.split(delimiter)\r\n\r\n # Assigning the data\r\n label = line[0]\r\n mass = float(line[1])\r\n position = np.array(line[2:5], float)\r\n velocity = np.array(line[5:], float)\r\n\r\n return Particle3D(label, mass, position, velocity)", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def parse(self, word):\n son_map = self._sonority_map(word)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)", "def process_raw_phrases(file_path):", "def parse_taxonomy(infile):\r\n\r\n res = {}\r\n for line in infile:\r\n if not line or line.startswith('#'):\r\n continue\r\n line = line.rstrip(\"\\n\")\r\n fields = line.split('\\t')\r\n otu = fields[0].split(' ')[0]\r\n res[otu] = taxa_split(fields[1])\r\n\r\n return res", "def __init__(self, filename=None, label=None, tokens=None):\n if label: # specify from label/tokens, for testing.\n self.label = label\n self.tokens = tokens\n self.postID = -1\n self.likes = -1\n else: # specify from file.\n self.filename = filename\n parsedNames = filename.split(\"#\")\n if 'pop' in parsedNames[0]:\n self.label = 'pop'\n else:\n self.label = 'sod'\n self.postID = parsedNames[1]\n self.likes = parsedNames[2]\n self.tokenize()", "def locateObjNumberNoun(data, questionDict, questionIdict):\n how = questionDict['how']\n many = questionDict['many']\n for t in range(data.shape[0] - 2):\n if data[t, 0] == how and \\\n data[t + 1, 0] == many:\n for u in range(t + 2, data.shape[0]):\n word = questionIdict[data[u, 0] - 1]\n lexname = lookupLexname(word)\n if (lexname is not None and \\\n lexname.startswith('noun')) or \\\n (lexname is None):\n return data[u, 0]\n print 'not found'\n return data[-1, 0]", "def parse_syn(syntenic):\n syn_ages = {}\n with open(syntenic, 'r') as f:\n for index, line in enumerate(f):\n if index == 0:\n continue\n else:\n tmp = line.strip().split('\\t')\n # keep only the tandems\n b_genes = []\n p_genes = []\n if ',' in tmp[2]:\n b_genes += tmp[2].split(',')\n if ',' in tmp[3]:\n b_genes += tmp[3].split(',')\n if ',' in tmp[4]:\n p_genes += tmp[4].split(',')\n if ',' in tmp[5]:\n p_genes += tmp[5].split(',')\n b_ages = []\n p_ages = []\n if tmp[6] != 'NA':\n b_ages += tmp[6].split(',')\n if tmp[7] != 'NA':\n b_ages += tmp[7].split(',')\n if tmp[8] != 'NA':\n p_ages += tmp[8].split(',')\n if tmp[9] !='NA':\n p_ages += tmp[9].split(',')\n # If there are B73 tandem duplicates, then we iterate through\n # and build the dictionary\n if b_ages:\n ct = 1\n for age in b_ages:\n b1 = b_genes[ct-1]\n b2 = b_genes[ct]\n syn_ages[(b1, b2)] = age\n ct += 1\n if p_ages:\n ct = 1\n for age in p_ages:\n p1 = p_genes[ct-1]\n p2 = p_genes[ct]\n syn_ages[(p1, p2)] = age\n ct += 1\n return syn_ages", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def read_data(self, filepath):\n with open(filepath, \"r\", encoding=\"utf8\") as input_data:\n ids, premises, hypotheses, premises_length, hypotheses_length, premise_polarities, hypothesis_polarities, labels = [], [], [], [], [], [], [], []\n\n for idx, line in enumerate(input_data):\n print(f\"Reading instance {idx}\", end='\\r')\n line = eval(line)\n hypothesis = eval(line['hypothesis'])\n premise = eval(line['premise'])\n label = line['label']\n\n # Align the nltk premise and the umls premise\n premise, premise_umls, premise_polarity = self.aligner(premise)\n hyp, hyp_umls, hypothesis_polarity = self.aligner(hypothesis)\n\n ids.append(idx)\n premises.append((premise, premise_umls))\n hypotheses.append((hyp, hyp_umls))\n labels.append(self.labeldict[label])\n premises_length.append(len(premise))\n hypotheses_length.append(len(hyp))\n premise_polarities.append(premise_polarity)\n hypothesis_polarities.append(hypothesis_polarity)\n\n return {\"ids\": ids,\n \"premises\": premises,\n \"hypotheses\": hypotheses,\n \"labels\": labels,\n \"premises_lengths\": premises_length,\n \"hypotheses_lengths\": hypotheses_length,\n \"premise_polarities\": premise_polarities,\n \"hypothesis_polarities\": hypothesis_polarities,\n \"max_premise_length\": max(premises_length),\n \"max_hypothesis_length\": max(hypotheses_length)}", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def readObject(f):\n name = f.readline().rstrip()\n if name == \"\":\n name = f.readline().rstrip()\n if name == \"\":\n return None\n description = f.readline().rstrip()\n location = f.readline().rstrip()\n return AdvObject(name, description, location )", "def get_nouns(self):\n word_punct_token = WordPunctTokenizer().tokenize(self.sentence_string)\n\n clean_tokens = []\n for token in word_punct_token:\n token = token.lower()\n\n # remove any value that are not alphabetical\n new_token = re.sub(r\"[^a-zA-Z]+\", \"\", token)\n\n # remove empty value and single character value\n if new_token != \"\" and len(new_token) >= 2:\n vowels = len([v for v in new_token if v in \"aeiou\"])\n if vowels != 0: # remove line that only contains consonants\n clean_tokens.append(new_token)\n\n noun_types = [\"NN\", \"NNS\", \"NNP\", \"NNPS\", \"N\"]\n is_noun = lambda pos: pos in noun_types\n nouns = [word for (word, pos) in nltk.pos_tag(clean_tokens) if is_noun(pos)]\n\n if nouns:\n return nouns\n else:\n raise InvalidSentenceError(self.sentence_string)", "def load_cmudict():\n with open(\"text/en/cmudict-0.7b.txt\", encoding=\"ISO-8859-1\") as file_reader:\n cmudict = (line.strip().split(\" \") for line in islice(file_reader, 126, 133905))\n\n cmudict = {format_alt_entry(word): pronunciation for word, pronunciation in cmudict}\n\n return cmudict", "def _process_data_file(self):\n \n with open(self.data_file, 'r') as f:\n self.description = f.readline().strip()\n data = np.loadtxt(self.data_file, skiprows=1)\n\n return data", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def load_raw_annot(filename):\n with open(filename, 'r') as fp:\n data = json.loads(fp.read())\n\n mapping = _create_mapping()\n\n for k in data.keys():\n for i in xrange(len(data[k])):\n data[k][i] = eval_utils.revise_sentiment(data[k][i], mapping)\n return data", "def read(self):\n try:\n f = open(self.datfile, 'r')\n except:\n print('ERROR: data file not found!')\n exit()\n\n # Get rid of the header\n for _ in range(self.header_length):\n f.readline()\n\n # Read nuclide mass data\n for line in f:\n ls = line.strip()\n n, z, ebind = ls.split()\n nuclide = BindingNuclide(n, z, ebind)\n self.nuclides.append(nuclide)\n\n f.close()", "def read_file(self, filename, per_atom=True):\n if isinstance(filename, str):\n fileobj = open(filename)\n elif isinstance(filename, file):\n fileobj = filename\n elif isinstance(filename, type(StringIO())):\n fileobj = filename\n fileobj.name = None\n thermodata = fileobj.readlines()\n headers = [h.lower() for h in thermodata.pop(0).strip().split()]\n if \"composition\" not in headers:\n raise PhaseDataError(\n \"Found columns: %s. Must provide composition in\\\n a column labelled composition.\"\n % (\", \".join(headers))\n )\n if \"energy\" not in headers and \"delta_e\" not in headers:\n raise PhaseDataError(\n \"Found columns: %s. Must provide energies in\\\n a column labelled delta_e or energy.\"\n % (\", \".join(headers))\n )\n\n keywords = {\n \"energy\": \"energy\",\n \"composition\": \"composition\",\n \"delta_e\": \"energy\",\n \"delta_h\": \"energy\",\n \"delta_g\": \"energy\",\n \"comp\": \"composition\",\n \"name\": \"composition\",\n \"desc\": \"description\",\n \"description\": \"description\",\n }\n\n headers = [keywords[h] for h in headers if h in keywords]\n\n name = filename.split(\"/\")[-1]\n\n for i, line in enumerate(thermodata):\n line = line.strip().split()\n if not line:\n continue\n ddict = dict(list(zip(headers, line)))\n phase = Phase(\n composition=ddict[\"composition\"],\n energy=float(ddict[\"energy\"]),\n description=ddict.get(\n \"description\", \"{file}:{line}\".format(file=name, line=i)\n ),\n per_atom=per_atom,\n )\n self.add_phase(phase)", "def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info", "def NOAD_to_wordnet(data):\r\n NOAD_to_wordnet = {}\r\n with open(algorithmic_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n with open(manual_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n \r\n count = 0\r\n for elem in data: \r\n if elem[\"is_target\"]:\r\n if elem[\"sense\"] not in NOAD_to_wordnet:\r\n count += 1\r\n continue\r\n noad_sense = elem[\"sense\"]\r\n elem[\"sense\"] = NOAD_to_wordnet[noad_sense]\r\n print(\"NOAD sense not in mapping text: %d\" %count)\r\n return data", "def read_po(self, inputfile):\n is_index = False\n lines = inputfile.readlines()\n index = ''\n value = ''\n for line in lines:\n if line.startswith('#'):\n continue\n elif line.startswith('msgid'):\n is_index = True\n self.translations[index] = value\n index = ''\n value = ''\n elif line.startswith('msgstr'):\n is_index = False\n\n v = re.match('.*\"(.*)\".*', line)\n if v:\n if is_index:\n index += ''.join(v.groups())\n else:\n value += ''.join(v.groups())", "def make_from_file(filehandle):\n lines = filehandle.readlines()\n label = str(lines[0].rstrip('\\n'))\n mass = float(lines[1].rstrip('\\n'))\n position = list(lines[2].rstrip('\\n').split(','))\n velocity = list(lines[3].rstrip('\\n').split(','))\n particle = Particle3D(label=label, mass=mass, position=position, velocity=velocity)\n filehandle.close()\n return particle", "def get_compound_noun( ngram ):\n try:\n pattern = re.compile( '((?: ?\\\\b[^\\\\s]+(?:/NN.?/[a-z]+/[\\\\d]+)){2,})' )\n match = re.search( pattern, ngram )\n if match:\n compound = ''\n contains_root = False\n tokens = match.group().strip().split(' ')\n for t in tokens:\n # embed()\n items = t.split('/')\n compound += ( items[0] + ' ' )\n if items[3] == 0:\n contains_root = True\n if contains_root:\n return compound\n else:\n return None\n else:\n return None\n \n except ValueError:\n return None", "def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']", "def __init__(self, infile):\n txt = infile.read()\n for block in self.splitter.split(txt):\n block = block.strip()\n if block:\n term = block.splitlines()[0].strip().decode('utf8')\n defn = \"\\n\".join(line.strip() for line in block.splitlines()[1:])\n self[term] = defn.decode('utf8')", "def read_data(self, filePath):\n with open(filePath, 'r', encoding='iso-8859-1') as f:\n for sentence in f.readlines():\n sentence = sentence.replace('\\n', '')\\\n .replace('\"', '')\\\n .replace('\\'', '')\\\n .replace('.', '')\\\n .replace(',', '')\\\n .replace('[', '')\\\n .replace(']', '')\\\n .replace('(', '')\\\n .replace(')', '')\\\n .replace(':', '')\\\n .replace('--', '')\\\n .replace('-', '')\\\n .replace('\\\\', '')\\\n .replace('0', '')\\\n .replace('1', '')\\\n .replace('2', '')\\\n .replace('3', '')\\\n .replace('4', '')\\\n .replace('5', '')\\\n .replace('6', '')\\\n .replace('7', '')\\\n .replace('8', '')\\\n .replace('9', '')\\\n .replace('`', '')\\\n .replace('=', '')\\\n .replace('$', '')\\\n .replace('/', '')\\\n .replace('*', '')\\\n .replace(';', '')\\\n .replace('<b>', '')\\\n .replace('%', '')\n sentence = sentence.split(' ')\n sentence = list(filter(lambda x: x, sentence))\n if sentence:\n self.word_num += len(sentence)\n self.maxlen = self.maxlen if self.maxlen >= len(\n sentence) else len(sentence)\n self.minlen = self.minlen if self.minlen <= len(\n sentence) else len(sentence)\n if 'pos' in filePath:\n self.Pos.append([sentence, self.feelMap['pos']])\n else:\n self.Neg.append([sentence, self.feelMap['neg']])", "def load(filename):\n\n print \"Loading dictionary...\"\n dictionary = Dictionary()\n print \" Loading file...\"\n whole_file = file(filename).read().upper()\n print \" Splitting file...\"\n words = whole_file.split()\n print \" Removing unsuitable words...\"\n words = dictionary.remove_unsuitable_words(words)\n print \" Building data structures...\"\n dictionary.set_words(words)\n\n print \" Loaded %d words\" % len(dictionary.words)\n print \" Unique letter size:\"\n print \" No blanks: %d\" % len(dictionary.letters_map)\n print \" One blank: %d\" % len(dictionary.letters_map_one_blank)\n print \" Two blanks: %d\" % len(dictionary.letters_map_two_blanks)\n\n return dictionary", "def load_subj(loc='./data/'):\n pos, neg = [], []\n with open(os.path.join(loc, 'SUBJ', 'subj.objective'), 'rb') as f:\n for line in f:\n pos.append(line.decode('latin-1').strip())\n with open(os.path.join(loc, 'SUBJ', 'subj.subjective'), 'rb') as f:\n for line in f:\n neg.append(line.decode('latin-1').strip())\n return pos, neg", "def __init__(self, file_name: str):\n self._file_name = file_name\n self._afinn = {}\n self._afinn_phrase = []\n self._reg_affin_phrase_str = \"\\\\s|[!,.\\'\\\"]\"\n # read the file AFFIN and map words to score\n with open(self._file_name, \"r\") as f:\n for str in f.readlines():\n entry = str.split()\n if (len(entry) > 2):\n length = len(entry)\n words = entry[0]\n for i in range(1, length - 1):\n words = words + ' ' + entry[i]\n self._reg_affin_phrase_str += \"|\"+words\n self._afinn_phrase.append(words)\n self._afinn[words] = int(entry[length - 1])\n else:\n self._afinn[entry[0]] = int(entry[1])", "def read_data(filename, eos='#'):\n ### Exercise 6.1\n\n with open(filename) as f:\n utterances = []\n labels = []\n\n for line in f:\n # Get utterance output and length\n utter = line\n utter = utter.replace(\" \", \"\").replace(\"\\n\", \"\") + \"#\"\n utterances.append(utter)\n # Make empty sequence\n sequence = np.zeros(len(utter), dtype=int)\n sequence[0], sequence[len(utter) - 1] = 1, 1\n # Find indexes of beginning of words\n prev_char = \"\"\n count = 0\n new_word_indexs = []\n for char in line:\n if char == \" \":\n prev_char = char\n continue\n if prev_char == \" \":\n prev_char = char\n new_word_indexs.append(count)\n count += 1\n else:\n prev_char = char\n count += 1\n for index in new_word_indexs:\n sequence[index] = 1\n labels.append(sequence)\n\n return (utterances, labels)", "def load_onto(filename=onto_filename):\n onto_dict = parse_yaml(filename)\n onto = CrosslinksOnto()\n for id, bond_dict in onto_dict.items():\n bond = Bond()\n onto[id] = bond\n\n bond.id = id\n bond.name = bond_dict.get('name', None)\n bond.synonyms = bond_dict.get('synonyms', []) \n\n l_alph = get_alphabet(bond_dict['l_monomer_alphabet'])\n r_alph = get_alphabet(bond_dict['r_monomer_alphabet'])\n bond.l_monomer = l_alph.monomers.get(bond_dict['l_monomer'])\n bond.r_monomer = r_alph.monomers.get(bond_dict['r_monomer'])\n\n for atom_type in ['l_bond_atoms', 'r_bond_atoms', 'l_displaced_atoms', 'r_displaced_atoms']:\n for atom in bond_dict[atom_type]:\n element, position, charge = parse_atom(atom)\n getattr(bond, atom_type).append(Atom(Monomer, element, position=position, charge=charge))\n\n bond.order = BondOrder[bond_dict.get('order', 'single')]\n stereo = bond_dict.get('stereo', None)\n if stereo is None:\n bond.stereo = None\n else:\n bond.stereo = BondStereo[stereo]\n\n bond.comments = bond_dict.get('comments', None)\n\n crosslink_to_id = {xlink: id for id, xlink in onto.items()}\n\n return onto, crosslink_to_id", "def parse_domain(self, domainfile):\n\n with open(domainfile) as dfile:\n dfile_array = self._get_file_as_array(dfile)\n #Deal with front/end define, problem, :domain\n if dfile_array[0:4] != ['(', 'define', '(', 'domain']:\n print('PARSING ERROR: Expected (define (domain ... at start of domain file')\n sys.exit()\n self.domain = dfile_array[4]\n\n dfile_array = dfile_array[6:-1]\n opencounter = 0\n keyword = ''\n obj_list = []\n is_obj_list = True\n for word in dfile_array:\n if word == '(':\n opencounter += 1\n elif word == ')':\n opencounter -= 1\n elif word.startswith(':'):\n if word[1:] not in DFILE_KEYWORDS:\n pass\n elif keyword != 'requirements':\n keyword = word[1:]\n if opencounter == 0:\n if keyword == 'action':\n self.actions.append(obj_list)\n obj_list = []\n if keyword == 'types':\n for element in obj_list:\n self.types.setdefault('object', []).append(element)\n self.type_list.add('object')\n self.type_list.add(element)\n obj_list = []\n keyword = ''\n\n if keyword == 'requirements': #Requirements list\n if word != ':requirements':\n if not word.startswith(':'):\n print('PARSING ERROR: Expected requirement to start with :')\n sys.exit()\n elif word[1:] not in DFILE_REQ_KEYWORDS:\n print('WARNING: Unknown Rquierement ' + word[1:])\n #print 'Requirements must only be: ' + str(DFILE_REQ_KEYWORDS)\n #sys.exit()\n else:\n self.requirements.add(word[1:])\n elif keyword == 'action':\n obj_list.append(word)\n elif not word.startswith(':'):\n if keyword == 'types': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if not word in self.type_list:\n self.types.setdefault('object', []).append(word)\n self.type_list.add(word)\n self.types.setdefault(word, []).append(element)\n self.type_list.add(element)\n self.type_list.add(word)\n is_obj_list = True\n obj_list = []\n elif keyword == 'constants': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if word in self.type_list:\n self.constants.setdefault(word, []).append(element)\n #self.object_list.add(element)\n else:\n print(self.type_list)\n print(\"ERROR unknown type \" + word)\n sys.exit()\n is_obj_list = True\n obj_list = []\n elif keyword == 'predicates' or keyword == 'private': #Internally typed predicates\n if word == ')':\n if keyword == 'private':\n #print \"...skip agent: \" + str(obj_list[:3])\n obj_list = obj_list[3:]\n keyword = 'predicates'\n if len(obj_list) == 0:\n #print \"...skip )\"\n continue\n p_name = obj_list[0]\n #print \"parse predicate: \" + p_name + \" \" + str(obj_list)\n pred_list = self._parse_name_type_pairs(obj_list[1:],self.type_list)\n self.predicates.append(Predicate(p_name, pred_list, True, False))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n elif keyword == 'functions': #functions\n if word == ')':\n p_name = obj_list[0]\n if obj_list[0] == '-':\n obj_list = obj_list[2:]\n #print \"function: \" + word + \" - \" + str(obj_list)\n self.functions.append(Function(obj_list))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n\n #Work on the actions\n new_actions = []\n for action in self.actions:\n if action[0] == '-':\n action = action[2:]\n act_name = action[1]\n act = {}\n action = action[2:]\n keyword = ''\n for word in action:\n if word.startswith(':'):\n keyword = word[1:]\n else:\n act.setdefault(keyword, []).append(word)\n self.agent_types.add(act.get('agent')[2])\n agent = self._parse_name_type_pairs(act.get('agent'),self.type_list)\n param_list = agent + self._parse_name_type_pairs(act.get('parameters')[1:-1],self.type_list)\n up_params = Predicate('', param_list, True, False)\n pre_list = self._parse_unground_propositions(act.get('precondition'))\n eff_list = self._parse_unground_propositions(act.get('effect'))\n new_act = Action(act_name, up_params, pre_list, eff_list)\n\n new_actions.append(new_act)\n self.actions = new_actions", "def extract_nouns(tagged_abstracts_list, def_tags_per_abs = 0.3):\n\n noun_counter = []\n per_abstract_counts = []\n per_abstract_counts_list = []\n normalized_all_counts = {}\n per_abstract_tag_counts = []\n\n for tags in tagged_abstracts_list:\n\n for tag in tags:\n\n if tag[1] == \"NN\" or tag[1] == \"NNS\" or tag[1] == \"NNP\" or tag[1] == \"NNPS\":\n\n per_abstract_tag_counts.append(str(tag[0].encode('ascii', 'ignore')))\n\n noun_counter.append(str(tag[0].encode('ascii', 'ignore')))\n\n per_abstract_dict = dict(Counter(per_abstract_tag_counts))\n per_abstract_counts_list.append(per_abstract_dict)\n\n all_counts = dict(Counter(noun_counter))\n\n num_abstracts = float(len(tagged_abstracts_list))\n\n for key in all_counts.keys():\n\n if key in all_counts:\n\n total_occurrences = float(all_counts[key])\n else:\n\n total_occurrences = 0\n\n for abstract_ in per_abstract_counts_list:\n #print abstract_\n\n if key in abstract_ and key in all_counts:\n\n single_abstract_count = float(abstract_[key])\n\n if (single_abstract_count/total_occurrences) < def_tags_per_abs:\n normalized_all_counts[key] = float(all_counts[key])/num_abstracts\n\n return (normalized_all_counts)", "def process_file(self, data, filename):\n\n for relation in data:\n if \"start_date\" in relation[1]:\n start = convert_to_date(relation[1][\"start_date\"])\n end = convert_to_date(relation[1][\"end_date\"])\n\n if start <= START_DATE and end >= START_DATE:\n first = relation[1][\"first\"]\n second = relation[1][\"second\"]\n if first not in self.relations:\n self.add_tag(first)\n if second not in self.relations:\n self.add_tag(second)\n\n if relation[0] == \"vassal\": \n self.relations[first][\"vassal\"].append(second)\n self.relations[second][\"overlord\"].append(first)\n elif relation[0] == \"alliance\":\n self.relations[first][\"alliance\"].append(second)\n self.relations[second][\"alliance\"].append(first)\n elif relation[0] == \"dependency\":\n self.relations[first][\"tributary\"].append(second)\n self.relations[second][\"hegemon\"].append(first)\n elif relation[0] == \"guarantee\":\n self.relations[first][\"guaranteeing\"].append(second)\n self.relations[second][\"guarantor\"].append(first)\n elif relation[0] == \"union\":\n self.relations[first][\"junior\"].append(second)\n self.relations[second][\"senior\"].append(first)\n elif relation[0] == \"royal_marriage\":\n self.relations[first][\"marriage\"].append(second)\n self.relations[second][\"marriage\"].append(first)", "def load_data_and_labels(data_file):\n # Load data from files\n obj = open(data_file, \"r\")\n y, x_text, query= [],[],[]\n for ele in obj:\n ele = ele.strip().split(\"\\t\")\n if len(ele) !=5 or ele[0].strip() not in [\"1\", \"-1\"]:\n #print ele\n continue\n if (ele[0].strip() == \"1\"):\n y.append([0])\n else:\n y.append([1])\n\n query_text = ele[1].strip().decode(\"utf8\")\n doc_text = ele[2].strip().decode(\"utf8\")\n x_text.append( \" \".join( jieba.cut(doc_text) ) )\n query.append( \" \".join( jieba.cut(query_text) ) )\n return [x_text, np.array(y), np.array(query)]", "def __init__(self, txt_path, in_vocab_path, out_vocab_path):\n self.txt_seqs = open(txt_path, encoding='utf8', errors='ignore').readlines()\n self.word2id = utils.load_vocab(in_vocab_path,\n extra_word_list=[\"<UNK>\", \"<END>\"])\n self.punc2id = utils.load_vocab(out_vocab_path,\n extra_word_list=[\" \"])\n self.class2punc = { k : v for (v, k) in self.punc2id.items()}", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'SubBasins':\n self.read_subbasins(f)\n elif self.cleantag(line) == 'HRUs':\n self.read_HRUs(f)\n # Next line\n line = f.nexttag()", "def _read_lexicon(a_dname):\n if not a_dname:\n return\n elif a_dname[-1] == '/':\n a_dname = os.path.dirname(a_dname)\n basename = os.path.basename(a_dname)\n if basename == HSAN:\n return _read_hsan(a_dname)\n elif basename == S140:\n return _read_s140(a_dname)\n elif basename == SUBJCL:\n return _read_subjcl(a_dname)\n elif basename == NRC_HSHTAG:\n return _read_nrc_hshtag(a_dname)\n else:\n raise Exception(\"Unknown dictionary format: '{:s}'\".format(basename))", "def read_bnf_file(self, file_name):\n # <.+?> Non greedy match of anything between brackets\n non_terminal_pattern = \"(<.+?>)\"\n rule_separator = \"::=\"\n production_separator = \"|\"\n\n # Read the grammar file\n for line in open(file_name, 'r'):\n if not line.startswith(\"#\") and line.strip() != \"\":\n # Split rules. Everything must be on one line\n if line.find(rule_separator):\n lhs, productions = line.split(rule_separator)\n lhs = lhs.strip()\n if not re.search(non_terminal_pattern, lhs):\n raise ValueError(\"lhs is not a NT:\", lhs)\n self.non_terminals.add(lhs)\n if self.start_rule == None:\n self.start_rule = (lhs, self.NT)\n # Find terminals\n tmp_productions = []\n for production in [production.strip()\n for production in\n productions.split(production_separator)]:\n tmp_production = []\n if not re.search(non_terminal_pattern, production):\n self.terminals.add(production)\n tmp_production.append((production, self.T))\n else:\n # Match non terminal or terminal pattern\n # TODO does this handle quoted NT symbols?\n for value in re.findall(\"<.+?>|[^<>]*\", production):\n if value != '':\n if not re.search(non_terminal_pattern,\n value):\n symbol = (value, self.T)\n self.terminals.add(value)\n else:\n symbol = (value, self.NT)\n tmp_production.append(symbol)\n tmp_productions.append(tmp_production)\n # Create a rule\n if not lhs in self.rules:\n self.rules[lhs] = tmp_productions\n else:\n raise ValueError(\"lhs should be unique\", lhs)\n else:\n raise ValueError(\"Each rule must be on one line\")", "def read_processed_data_from_file(file, encoding='latin1'):\n\n with open(file, encoding=encoding) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n labeled_texts = []\n n = len(lines) - 1\n for i, line in enumerate(lines):\n print(f'\\rLoading review {i} of {n}', end='')\n if line == '':\n continue\n tagged_words = re.findall(r'(.+?\\\\.+?) ', line)\n label = re.findall(r'#(\\d+.\\d)#', line)[0]\n labeled_texts.append((tagged_words, label))\n print()\n return labeled_texts", "def parse_text(filehandle: TextIO) -> Iterator[Fasta]:\n\n # Check that the file looks like UniProt text format\n first_line = next(filehandle)\n if not first_line.startswith(\"ID\"):\n raise TextParserError(\n \"Unexpected file format: first line of UniProt text file should start with 'ID'\"\n )\n filehandle.seek(0)\n\n fasta = Fasta(sequence=\"\")\n for line in filehandle:\n key = line[:2] # This is more efficient than using line.startswith\n if key == \"ID\":\n tokens = line.split()\n fasta.entry_name = tokens[1]\n fasta.reviewed = True if tokens[2] == \"Reviewed;\" else False\n elif key == \"AC\":\n if fasta.accession is None:\n accessions = line[5:].rstrip(\";\\n\").split(\"; \")\n fasta.accession = accessions[0]\n elif key == \"DT\":\n if \"sequence version\" in line:\n tokens = line[5:].strip(\".\\n\").split()\n fasta.version = int(tokens[3])\n elif key == \"DE\":\n if \"RecName\" in line:\n fasta.name = _extract_name(line)\n # Get the first SubName if no RecName found\n elif fasta.name is None and line[5:12] == \"SubName\":\n fasta.name = _extract_name(line)\n elif line[5:10] == \"Flags\" and \"Fragment\" in line:\n fasta.fragment = True\n elif key == \"GN\":\n if line[5:10] == \"Name=\":\n tokens = line[10:].split(\";\")\n # Remove evidence tags, if present\n gene_tokens = tokens[0].split(\" {\")\n fasta.gene = gene_tokens[0]\n elif key == \"OS\":\n # TODO: check for multiline species name (excluding brackets)\n if fasta.species is None:\n species_line = line[5:].strip().split(\" (\")\n fasta.species = species_line[0].strip(\".\")\n elif key == \"OX\":\n if \"NCBI_TaxID\" in line:\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n # Remove evidence tag if present\n taxid_tokens = tokens[0][11:].split(\" {\")\n fasta.taxid = taxid_tokens[0]\n elif key == \"PE\":\n fasta.evidence = int(line[5])\n elif key == \" \":\n sequence_line = line.strip().replace(\" \", \"\")\n fasta.sequence += sequence_line\n elif key == \"//\":\n yield fasta\n fasta = Fasta(sequence=\"\")", "def read_dictionary_from_file(self, stem_flag):\n file_name = \"/dictionary.txt\" if not stem_flag else \"/dictionaryWithStemming.txt\"\n with open(self.posting_and_dictionary_path + file_name, \"r\") as f:\n txt = f.readlines()\n for line in txt:\n l = line.split(\":\")\n pos = l[1].split(\",\")\n e = DictionaryElement(pos[0])\n e.pointer = int(pos[1])\n e.corpus_tf = int(pos[2])\n if not stem_flag:\n self.term_dictionary[l[0]] = e\n else:\n self.term_dictionary_with_stemming[l[0]] = e\n f.close()", "def parseLabels(filename):\n r = parse(filename)\n res = {}\n for qid in r:\n lst = []\n for y in r[qid].split(\";\"):\n doc, score = y.split(\",\")\n lst.append((int(doc), float(score)))\n res[qid] = lst\n return res", "def read(self, filename, species=None, poscar=None, sort=True):\n self.tags = dict()\n try:\n file = open(filename,'r')\n except:\n raise IncarError(\"Could not open file: '\" + filename + \"'\")\n\n # parse INCAR into self.tags\n for line in file:\n line = re.split('=',re.split('#',line)[0])\n if len(line) == 2:\n self.tags[line[0].strip()] = line[1].strip()\n self._verify_tags()\n self._make_natural_type()\n\n if species != None:\n self.update(species, poscar, sort)\n\n file.close()", "def parse_game_data(file_name : str) -> List[Topic]:\n # Opens file.\n data = open(file_name, 'r')\n\n # Sets up variables.\n current_topic = None\n parsed_data = []\n\n # Loops over each line in the file.\n for line in data:\n \n # Creates new topic if there is none.\n if current_topic is None:\n current_topic = Topic(line[:-1])\n\n # Saves topic if encountered empty line.\n elif line == '\\n':\n parsed_data.append(current_topic)\n current_topic = None\n\n # Parses question line and adds to topic.\n else:\n question, answer, points = parse_line(line)\n new_question = Question(question, answer, points)\n current_topic.questions.append(new_question)\n\n return parsed_data", "def parse_nonsyn(nonsyn):\n nonsyn_ages = {}\n with open(nonsyn, 'r') as f:\n for index, line in enumerate(f):\n if index == 0:\n continue\n else:\n tmp = line.strip().split('\\t')\n b_genes = []\n p_genes = []\n if ',' in tmp[1]:\n b_genes += tmp[1].split(',')\n if ',' in tmp[2]:\n p_genes += tmp[2].split(',')\n b_ages = []\n p_ages = []\n if tmp[3] != 'NA':\n b_ages += tmp[3].split(',')\n if tmp[4] != 'NA':\n p_ages += tmp[4].split(',')\n if b_ages:\n ct = 1\n for age in b_ages:\n b1 = b_genes[ct-1]\n b2 = b_genes[ct]\n nonsyn_ages[(b1, b2)] = age\n ct += 1\n if p_ages:\n ct = 1\n for age in p_ages:\n p1 = p_genes[ct-1]\n p2 = p_genes[ct]\n nonsyn_ages[(p1, p2)] = age\n ct += 1\n return nonsyn_ages", "def parse_file(\n self, filename: Path, encoding: Optional[str] = None, debug: bool = False\n ) -> NL:\n with open(filename, encoding=encoding) as stream:\n return self.parse_stream(stream, debug)", "def from_psana_file(cls, filename):\n ret = translate.load_psana(cls, filename)\n ret._sort_tree()\n return ret", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"" ]
[ "0.5926569", "0.56615496", "0.5575726", "0.5492108", "0.54724497", "0.54638004", "0.5449956", "0.5432941", "0.5421955", "0.53711075", "0.53692234", "0.535761", "0.53158194", "0.5268704", "0.52373093", "0.5237189", "0.5234773", "0.5220252", "0.52158135", "0.5205877", "0.52025634", "0.5196424", "0.51920056", "0.51561964", "0.51454395", "0.5133324", "0.5114785", "0.511348", "0.5100287", "0.5081667", "0.5076924", "0.50706595", "0.50689846", "0.5049998", "0.5047136", "0.50470495", "0.5041752", "0.5037247", "0.50327754", "0.5028755", "0.50259846", "0.50230914", "0.50159854", "0.49958083", "0.49870625", "0.49765825", "0.49618733", "0.49589851", "0.49481818", "0.4941247", "0.49389294", "0.49353433", "0.4934022", "0.49314773", "0.4927532", "0.49209815", "0.49182317", "0.49180028", "0.49105716", "0.49091825", "0.4905054", "0.49043468", "0.49008638", "0.48914766", "0.48857662", "0.48852444", "0.4883498", "0.4878825", "0.48639783", "0.48600167", "0.48586133", "0.48578298", "0.48566645", "0.48505566", "0.48480323", "0.48450273", "0.48387247", "0.48349926", "0.4833491", "0.4827303", "0.4821407", "0.48071674", "0.48009306", "0.4800658", "0.48004252", "0.48000848", "0.47996783", "0.47940934", "0.47876686", "0.47848424", "0.4780564", "0.47760433", "0.47759178", "0.47758463", "0.47720712", "0.4759228", "0.47574964", "0.475458", "0.47463918", "0.47450367" ]
0.69166636
0
Returns the self.guessed_by and self.metaphors_used data as a readable string.
def get_str_metadata(self): return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_strings(self):\n return self._guessed_strings", "def __str__(self):\n d = {}\n d[\"tuner_number\"] = self.tuner_number\n d[\"output_format\"] = self.output_format\n d[\"output_source\"] = self.output_source\n return str(d)", "def get_human_readable(self):\n\n def yesno(key):\n if getattr(self, key) and getattr(self, key) > 0:\n return \"Y\"\n else:\n return \"N\"\n\n keys = (\n \"pvs1\",\n \"ps1\",\n \"ps2\",\n \"ps3\",\n \"ps4\",\n \"pm1\",\n \"pm2\",\n \"pm3\",\n \"pm4\",\n \"pm5\",\n \"pm6\",\n \"pp1\",\n \"pp2\",\n \"pp3\",\n \"pp4\",\n \"pp5\",\n \"ba1\",\n \"bs1\",\n \"bs2\",\n \"bs3\",\n \"bs4\",\n \"bp1\",\n \"bp2\",\n \"bp3\",\n \"bp4\",\n \"bp5\",\n \"bp6\",\n \"bp7\",\n )\n result = \", \".join([\"%s: %s\" % (key.upper(), yesno(key)) for key in keys])\n result += \", ACMG classification: %s\" % self.class_auto\n if self.class_override:\n result += \", ACMG class. override: %s\" % self.class_override\n return result", "def to_formatted(self) -> str:\n return str(self.google_confidence) + \\\n \"\\t\" + str(self.normalized_sentence_score) + \\\n \"\\t\" + str(self.gaps_transcript) + \\\n \"\\t\" + str(self.gaps_google)", "def __str__(self):\n result = \", \".join(map(str, self.hand))\n result += \"\\n \" + str(self.get_score()) + \" points\"\n return result", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def __repr__(self):\n return \"{} hp:{:.1f} stunned: {} potions: {}\".format(self.name, self.hitpoints,\n self.stunned, self.potions)", "def __str__(self):\r\n to_string = \"ID: \" + str(self.dat_id) + \" --- CLASSIFICATION: \" + str(self.dat_party) + \" --- VOTED: \" + str(self.dat_votes)\r\n return to_string", "def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s", "def __str__(self):\n header = [\n ' GnoweeHeuristics:']\n header += [('Population = {}').format(self.population)]\n header += [('Sampling Method = {}').format(self.initSampling)]\n header += [('Discovery Fraction = {}').format(self.fracMutation)]\n header += [('Elitism Fraction = {}').format(self.fracElite)]\n header += [('Levy Fraction = {}').format(self.fracLevy)]\n header += [('Levy Alpha = {}').format(self.alpha)]\n header += [('Levy Gamma = {}').format(self.gamma)]\n header += [('Levy Independent Samples = {}').format(self.n)]\n header += [('Levy Scaling Parameter = {}').format(self.scalingFactor)]\n header += [('Constraint Violaition Penalty = {}').format(self.penalty)]\n header += [('Max # of Generations = {}').format(self.maxGens)]\n header += [('Max # of Function Evaluations = {}').format(self.maxFevals)]\n header += [('Convergence Tolerance = {}').format(self.convTol)]\n header += [('Stall Limit = {}').format(self.stallLimit)]\n header += [('Optimal Convergence Tolerance = {}').format(self.optConvTol)]\n header += [' Attributes Inhereted from ProblemParameters:']\n header += [('{}').format(ProblemParameters.__str__(self))]\n return ('\\n').join(header) + '\\n'", "def __str__(self):\n out = \"!!!!!!! REPORTED STATISTICS !!!!!!!\\n\"\n for k in self.order:\n if k in self.keys():\n if k in self.explainer.keys():\n out += self.explainer[k].replace('XXXX', str(\n self[k])) + \"\\n\"\n else:\n out += self[k] + \"\\n\"\n for k in self.keys():\n if k not in self.order:\n out += str(self[k])\n return out", "def __str__(self):\n return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(\n self.genes) + '; Fitness: ' + str(self.fitness_value))", "def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )", "def info(self):\n return (f\"Match id: {self._id}\\n\"\n f\"dire_score: {self.dire_score}\\n\"\n f\"dire_team: {self.dire_team}\\n\"\n f\"duration: {self.duration}\\n\"\n f\"game_mode: {self.game_mode}\\n\"\n f\"patch: {self.patch}\\n\"\n f\"radiant_score: {self.radiant_score}\\n\"\n f\"radiant_team: {self.radiant_team}\\n\"\n f\"radiant_win: {self.radiant_win}\\n\"\n f\"skill: {self.skill}\\n\"\n f\"start_time: {self.start_time}\\n\")", "def _to_str(self):\n\t\tprint(\"predictors: {}, types: {} \\n method: {}, preprocessing: {}\\\n\t\t\t \\n partition_rate: {}, metric: {}, file name: {}\".format(\n\t\t\t self.predictors, self.predictors_types, self.method_name,\n\t\t\t self.preprocessing_methods, self.data_split, self.metric,\n\t\t\t self.plotting_file_name))", "def __str__(self):\n return \"{}\".format(self._matches.keys())", "def __str__(self):\n prob = str(round(self.probability, 5))\n dprob = str(round(self.postdProbability, 5))\n output = \"dprob: \" + dprob + \" \\tprob: \" + prob + \"\\t: \"\n for key in self.attackDict.keys():\n output += key + \" \"\n return output", "def __str__(self):\n sorted_table = InferenceUtils.get_n_best(self._table, max(len(self._table), 1))\n\n result = []\n for key, value in sorted_table.items():\n result.append('P(%s):=%f\\n' % (str(key), value))\n\n return ''.join(result)[:-1] if len(result) > 0 else ''", "def getOpponentFormat(self):\r\n return self.opponent + \"\\t\"", "def str(self):\n out = \"{0}:\".format(self.gtype) if self.gtype else \"\"\n out += \"{0}\".format(repr(self.coords))\n out += \"[{0}]\".format(str(self.goalPtr)) if self.goalPtr else \"\"\n return out", "def to_string(self):\n\n return '[[%s], [%s]], [%d, %d], [%s], %s, %s, [%s]' % \\\n (', '.join(INT2STRING_CARD[h] for h in self.hand[0]),\n ', '.join(INT2STRING_CARD[h] for h in self.hand[1]),\n self.pot[0], self.pot[1],\n ', '.join(INT2STRING_CARD[p] for p in self.pub),\n INT2STRING_PHASE[self.phase],\n INT2STRING_PLAYER[self.player],\n ', '.join(INT2STRING_STATUS[s] for s in self.status))", "def __repr__(self):\n return \"{} hp:{:.1f} stunned: {}\".format(self.name, self.hitpoints,\n self.stunned)", "def __str__(self):\n return \"{} : {}\".format(self._conference, self.win_ratio_avg())", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def get_info(self):\n out = ''\n for k in sorted(self.components.keys()):\n out += '{:s}: {:s}'.format(k, self.info[k]) + '\\n'\n return(out)", "def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)", "def to_string(self):\n return \"User: {} Description: {} Ratings: {}\".format(self.id_user, self.description, self.ratings)", "def __str__(self):\n return ', '.join([self.yftf_data, self.info_hash, str(self.num_pieces), str(self.peers)])", "def __str__(self):\n return str((self.code, self.fitness,))", "def _get_problem_report_results_str(self):\n return 'curr_rew: %0.3f, best_rew: %0.3f'%(self.curr_reward, self.curr_best_reward)", "def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def __str__(self):\n return \"{} : {}\".format(self._team_name, self._win_ratio)", "def __str__(self):\n return f\"A {self.color} {self.__class__.__name__} worth {self.points} points \" \\\n f\"with one hot encoding {self.oneHotEncoding}\"", "def get_summary(self):\n \n text = \"word: {}, total_score: {} \\n\".format(self.clue, self.total_score)\n for card, score in self.sorted_card_score_pairs:\n card_text = \"\\t card.name:{} (team:{}), similarity: {} \\n\".format(card.name, card.color, score)\n text += card_text\n return text", "def stats_get_str(self):\n return self.stats.get_all_str()", "def stats_get_str(self):\n return self.stats.get_all_str()", "def __str__(self):\n \n s = \"(R: \" + str(self.r) + \", G: \" + str(self.g) + \", B: \" + str(self.b) + \")\"\n return s", "def __str__(self):\n return \"{\" + (\", \".join(\"%s: %s\"%(ngram, value) for (ngram, value) in self.items())) + \"}\"", "def __str__(self):\n result = ('---> Population - Generation: ' + str(self.generation)\n + '<--- \\n')\n result += 'Fittest Chromosome: \\n' + str(self.fittest_chromosome)\n\n for chromosome in self.chromosomes:\n result += str(chromosome) + '\\n'\n\n return result", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def __str__(self):\n return Hand.__str__(self) + '\\nHand Rank: ' + self.get_full_label()", "def __str__(self):\n return \"Name: \" + self._name + \"\\nScores: \" + \\\n \" \".join(map(str, self._scores))", "def __str__(self):\n result = \", \".join(map(str, self.cards))\n result += \"\\n \" + str(self.getPoints()) + \" points\"\n return result", "def __str__(self):\n output = 'Pathogens:\\n'\n for x in self.extant_p:\n output += ' n %s h %f d %f host %s extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_extant_p:\n output += ' n %s h %f d %f host %s not extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_yet_sampled_p:\n output += ' n %s h %f d %f host %s not yet sampled\\n' % (x.name, x.height, x.dist, x.host.name)\n\n output += 'Hosts:\\n'\n for x in self.extant_h:\n output += ' %s %f %f extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_extant_h:\n output += ' %s %f %f not extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_yet_sampled_h:\n output += ' %s %f %f not yet sampled\\n' % (x.name, x.height, x.dist)\n\n return output", "def to_string(self):\n return self.dungeon_string", "def to_string(self):\n if self.is_power_onoff():\n return 'Power On/Off'\n else:\n gain = str(hex(int(self['gain_speed'])))\n out = self['target'].ljust(20) + ' ' + self['filters'].ljust(11) + ' ' + self['x_bin'] + 'x' + self['y_bin'] + ' ' + gain[2:].upper()\n \n \n if self.number_windows() > 0:\n out += ' ' + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + ' ' + self['x1_start'].ljust(3) + ' ' + self['y1_start'].ljust(4)\n if self.number_windows() > 1:\n out += ' ' + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + ' ' + self['x2_start'].ljust(3) + ' ' + self['y2_start'].ljust(4)\n \n if 'Comment' in self:\n out += ' ' + self['Comment']\n return out", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def __str__(self):\n summary = '{} object in the {} language, consisting of {} tokens.'\n return summary.format(\n type(self).__name__, self.language,\n len(self.hand_tagged)\n )", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pprint.pformat(self.to_dict())" ]
[ "0.66056585", "0.6369337", "0.63330597", "0.63225114", "0.62778735", "0.62415534", "0.6170479", "0.6140569", "0.60968494", "0.60799277", "0.60438675", "0.60398", "0.60284495", "0.6009198", "0.59779775", "0.59726894", "0.597072", "0.59602815", "0.594238", "0.5917474", "0.5892665", "0.5871034", "0.5856106", "0.5852676", "0.58474237", "0.5828681", "0.5828217", "0.5819358", "0.58059597", "0.5805112", "0.5796561", "0.579408", "0.5786001", "0.57851934", "0.5763738", "0.57520485", "0.57520485", "0.5740636", "0.5740179", "0.5727682", "0.5727199", "0.5713208", "0.5701119", "0.5699988", "0.5696428", "0.56847936", "0.5673011", "0.56681913", "0.5662719", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391", "0.5656391" ]
0.80824745
0
Takes `x_axis` and returns an uniformly sampled array of values from its minimum to its maximum with few extra points
def _extended_discrete_xaxis(x_axis, n_points=100, eps=0.10): min_value = np.min(x_axis) max_value = np.max(x_axis) distance = max_value - min_value return np.linspace(min_value - eps * distance, max_value + eps * distance, num=n_points)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(self):\n # For each row: round(random .* (max - min) + min, 0)\n random_array = prng.np_random.rand(self.num_discrete_space)\n return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]", "def scale(x_range=1, y_range=1):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def simulate_x_values(self, minimum = -10, maximum = 10, length = 100):\n return np.sort(np.random.uniform(minimum, maximum, length) )", "def _default_sampling_xrange(self):\n from scipy.stats import rv_continuous\n dataset = self.rvdist.rvs(1000) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.dataset\n scale = np.nanmax(dataset) - np.nanmin(dataset)\n return [np.nanmin(dataset) - scale*0.05, np.nanmax(dataset) + scale*0.05]", "def uniform_sample(x):\n return np.random.choice(x)", "def _random_x(self):\n return np.random.uniform(-self._extent, self._extent, self._batchsize)", "def sample_from_zero_axis(x):\n idx = np.random.choice(x.shape[0], 1, replace=True)[0]\n return x[idx], idx", "def sampler(xaxis, yaxis, vals, x, y):\n i = 0\n while xaxis[i] < x:\n i += 1\n j = 0\n while yaxis[j] < y:\n j += 1\n return vals[i, j]", "def rand_custom(x: np.ndarray, y: np.ndarray, \n shape: tuple=(1,), interp_type: str='linear') -> np.ndarray :\n \n assert (y >= 0).any(), \"y shouldn't contain negative numbers\"\n \n size = 1\n for n in shape:\n size *= n\n \n y_norm = y/y.max()\n func = interp1d(x, y_norm, kind=interp_type)\n integr_ratio = (x.max() - x.min())/trapz(y_norm, x)\n \n full = np.array([])\n \n while full.shape[0] < size:\n size_all = int(np.round(integr_ratio*(size - full.shape[0])))\n \n a = np.random.uniform(size=size_all)\n b = np.random.uniform(x.min(), x.max(), size=size_all)\n \n full = np.hstack([full, b[np.where(a < func(b))[0]]])\n \n return full[:size].reshape(shape).copy()", "def initial_sampling(y):\n samples = list(np.random.randint(0, len(y), 2))\n while len(np.unique(y[samples] > 0.5)) != 2:\n samples = list(np.random.randint(0, len(y), 2))\n return samples", "def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,\n input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes + 1.0) - 1.0\n for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def bernoulliSample(x):\n \n g = tf.get_default_graph()\n \n with ops.name_scope(\"BernoulliSample\") as name:\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\n return tf.ceil(x - tf.random_uniform(tf.shape(x)),name=name)", "def x_uniform1(xlim, n):\n return linspace(xlim[0], xlim[1], n)", "def rand(self, x):\r\n return np.random.random(1)[0]", "def scale_1d(x):\n return (min(x), max(x), len(x))", "def generate_random_data(min_, max_, len_):\n return np.random.uniform(min_, max_, len_)", "def sample(self):\n high = self.high.type(torch.float64) if self.dtype.is_floating_point else self.high.type(torch.int64) + 1\n sample = torch.empty(self.shape, dtype=torch.float64)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = torch.randn(unbounded[unbounded].shape, dtype=torch.float64)\n\n sample[low_bounded] = (-torch.rand(low_bounded[low_bounded].shape, dtype=torch.float64)).exponential_() + \\\n self.low[low_bounded]\n\n sample[upp_bounded] = self.high[upp_bounded] - (\n -torch.rand(upp_bounded[upp_bounded].shape, dtype=torch.float64)).exponential_()\n\n sample[bounded] = (self.low[bounded] - high[bounded]) * torch.rand(bounded[bounded].shape,\n dtype=torch.float64) + high[bounded]\n\n if not self.dtype.is_floating_point: # integer\n sample = torch.floor(sample)\n\n return sample.type(self.dtype)", "def _get_random_sample(self):\n p=np.zeros(len(self.dim_ranges))\n for i in range(len(self.dim_ranges)):\n temp=np.linspace(self.dim_ranges[i][0],self.dim_ranges[i][1],1000)\n p[i]=np.random.choice(temp,1,True,None)\n\n return p", "def _sample_schechter(x0, alpha, x_min, size=100, max_iter=1000):\n out = []\n n = 0\n num_iter = 0\n while (n<size) & (num_iter<max_iter):\n x = np.random.gamma(scale=x0, shape=alpha+2, size=size)\n x = x[x>x_min]\n u = np.random.uniform(size=x.size)\n x = x[u<x_min/x]\n out.append(x)\n n+=x.size\n num_iter += 1\n\n if num_iter >= max_iter:\n msg = (\"The maximum number of iterations reached.\",\n \"Random variates may not be representitive.\",\n \"Try increasing `max_iter`.\")\n print(msg)\n\n return np.concatenate(out)[:size]", "def minmax_scale(X, feature_range=..., *, axis=..., copy=...):\n ...", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def uniform(feature, bins):\n t = (feature.max()-feature.min())/bins\n return [t*i for i in range(1, bins)]", "def get_uniform_axis_sample(a, b, N, dtype):\n assert a < b, \"condition a < b violated!\"\n assert isinstance(N, int), \"condition N of type int violated!\"\n if dtype is int:\n return list(np.linspace(a, b, N).astype(int))\n elif dtype is float:\n return list(np.linspace(a, b, N))\n else:\n raise AssertionError(\"dtype {} not supported for uniform sampling!\".format(dtype))", "def _ScatterXUniformly(self, num_points, lattice_sizes, input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes - 1.0) for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")", "def myTakeStep2(x):\n s = 0.5\n x += np.random.uniform(-s, s, np.shape(x))\n return x", "def random_transform(self, x, seed=None):\n # x is a single audio, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if not (self.zoom_range[0] == 1 and self.zoom_range[1] == 1):\n zx = np.random.uniform(self.zoom_range[0], self.zoom_range[1])\n input_length = x.shape[img_row_axis]\n x = resample(x, num=int(zx * x.shape[img_row_axis]), axis=img_row_axis)\n if x.shape[img_row_axis] >= input_length:\n x = x[:input_length]\n else:\n x = np.pad(x, ((0, input_length - x.shape[img_row_axis]), (0, 0)),\n 'constant', constant_values=(0, np.mean(x)))\n\n if shift:\n hx = np.random.uniform(-self.shift, self.shift)\n x = shift(x, (int(hx * x.shape[img_row_axis]), 0), mode=self.fill_mode, cval=self.cval)\n\n if self.roll_range:\n tx = np.random.uniform(-self.roll_range, self.roll_range)\n if self.roll_range < 1:\n tx *= x.shape[img_row_axis]\n x = np.roll(x, int(tx), axis=(img_row_axis))\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = np.flip(x, axis=img_row_axis)\n\n if (self.noise):\n if np.random.random() < 0.5:\n if self.noise[-1] == 'Uniform':\n x = x + np.random.uniform(self.noise[0], self.noise[1], size=x.shape)\n elif self.noise[-1] == 'Normal':\n x = x + np.random.normal(self.noise[0], self.noise[1], size=x.shape)\n\n if self.brightness_range is not None:\n x = random_brightness(x, self.brightness_range)\n\n return x", "def random_shift(x, fraction):\n min_x, max_x = np.min(x), np.max(x)\n m = np.random.uniform(-fraction, fraction, size=x.shape) + 1\n return np.clip(x * m, min_x, max_x)", "def gen_data(min_coord, max_coord, size):\r\n data = np.random.randint(min_coord, max_coord, size)\r\n return data", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def get_gaussian_axis_sample(a, b, N, dtype):\n assert a < b, \"condition a < b violated!\"\n assert isinstance(N, int), \"condition N of type int violated!\"\n\n data = []\n for n in range(N):\n x = a + get_norm_cdf(N)[n]*(b-a)\n if dtype is int:\n data.append(int(x))\n elif dtype is float:\n data.append(x)\n else:\n raise AssertionError(\"dtype {} not supported for uniform sampling!\".format(dtype))\n return data", "def sample_points(x, n_samples, axis=0, replace=True):\n n_original = x.shape[axis]\n indices = np.random.choice(n_original, n_samples, replace=replace)\n return x.take(indices, axis=axis)", "def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()", "def sample(self):\n ndim = len(self.lower_bounds)\n pts = numpy.zeros(ndim)\n for j in range(ndim):\n lb = self.lower_bounds[j]\n ub = self.upper_bounds[j]\n pts[j] = numpy.random.uniform(lb, ub)\n return pts", "def _generate_signal(self):\n x = np.arange(self.n, dtype='float')\n resample = np.random.rand(self.n) >= self.proba\n resample[0] = True # randomly initialize first sample\n x[resample] = np.random.randn(np.sum(resample))\n for i in x[~resample]:\n x[int(i)] = x[int(i)-1]\n return x", "def log_uniform_sample(sample_range):\n log_min = np.log10(sample_range[0])\n log_max = np.log10(sample_range[1])\n u = np.random.rand()*(log_max-log_min) + log_min\n return np.power(10.0,u)", "def xavier_init(dims, uniform=True):\n n_inputs,n_outputs = dims\n if uniform:\n # 6 was used in the paper.\n init_range = np.sqrt(6.0 / (n_inputs + n_outputs))\n return tf.random_uniform(shape=dims,minval=-init_range, maxval=init_range)\n else:\n # 3 gives us approximately the same limits as above since this repicks\n # values greater than 2 standard deviations from the mean.\n stddev = np.sqrt(3.0 / (n_inputs + n_outputs))\n return tf.truncated_normal(shape=dims,stddev=stddev)", "def min_max_normalization(x, min_x = None, max_x = None):\n if min_x is None:\n min_x = np.min(x, axis=0)\n if max_x is None:\n max_x = np.max(x, axis=0)\n return (x - (min_x)) / (max_x - min_x), min_x, max_x", "def get_dummy_sample():\n return np.array([softmax(np.random.rand(5)), softmax(np.random.rand(5))])", "def normalize_axis(x, axis, dtype=float):\n x = x.astype(dtype)\n ind_list = [slice(None) for i in range(x.ndim)]\n try:\n for i in range(x.shape[axis]):\n ind_list[axis] = i\n ind = tuple(ind_list)\n minn = x[ind].min()\n maxx = x[ind].max()\n x[ind] = (x[ind]-minn) / (maxx-minn)\n except IndexError:\n raise np.AxisError(\n \"axis {} is out of bounds for array of dimension {}\".format(\n axis, x.ndim\n )\n )\n return x", "def sample_from_prior(self, n_samples):\n\n p0 = self.min + self.rng.rand(n_samples) * (self.max - self.min)\n return p0[:, np.newaxis]", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def random_sample(grid_size):\r\n g = grid_size\r\n x_range = g[1] - g[0]\r\n\r\n y_range = g[3] - g[2]\r\n\r\n x_off = g[0]\r\n y_off = g[2]\r\n (x,y) = (x_range*np.random.ranf()+x_off,y_range*np.random.ranf()+y_off) \r\n return (x,y)", "def getScaleValues(a, x):\n raise NotImplementedError('getScaleValues not implemented')", "def scale_data(x):\n mu = x.mean(axis=0)\n sigma = x.std(axis=0)\n x = (x - mu) / sigma\n return (x, mu, sigma)", "def _random2min_max(points):\n x_max = max([x for x, y in points])\n x_min = min([x for x, y in points])\n y_max = max([y for x, y in points])\n y_min = min([y for x, y in points])\n return np.array([x_min, y_min, x_max, y_max])", "def perturb_point(self, x, scale):\n x_samp = x + (scale / 2.0) * (np.random.rand(3) - 0.5)\n return x_samp", "def data_range(x):\n return max(x)-min(x)", "def sample_input_domain(num_samples):\n s1 = np.random.random(num_samples) * 10\n s2 = np.random.random(num_samples) * 2 - 5\n s3 = np.random.random(num_samples)\n s4 = np.random.random(num_samples) * 30 + 20\n return s1, s2, s3, s4", "def bernoulliSample(x):\r\n g = tf.get_default_graph()\r\n\r\n with ops.name_scope(\"BernoulliSample\") as name:\r\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\r\n\r\n if args[\"deterministic_train\"]:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n if args[\"deterministic_eval\"]:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n mus = tf.cond(is_training, train_fn, eval_fn)\r\n\r\n return tf.ceil(x - mus, name=name)", "def generate_real_samples(dataset, n_samples):\r\n ix = np.random.randint(0, dataset.shape[0], n_samples)\r\n X = dataset[ix]\r\n y = np.ones((n_samples, 1))\r\n return X, y", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n \r\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def initial_x():\n\n # RANDOMLY GENERATES the INITIAL VALUES of the independent variables:\n temp = [uniform(1, cfg.n) for i in range(cfg.n)]\n\n return np.array(temp, dtype=np.float_)", "def unif_sample(x_tensor, sample_size, dist):\n feature_len = x_tensor.size()[1]\n samples_tensor = torch.zeros((sample_size, feature_len),\n dtype=torch.float64, device=config.DEVICE).uniform_(-1.0 * dist, dist)\n samples_tensor += x_tensor\n return samples_tensor", "def _scale(x, axis=None):\n x = _remove_baseline(x, axis=axis)\n x /= np.std(x, ddof=1, axis=axis, keepdims=True)\n return x", "def get_random_position(limits):\n x = (limits[0][1]-limits[0][0])*np.random.random_sample() + limits[0][0]\n y = (limits[1][1]-limits[1][0])*np.random.random_sample() + limits[1][0]\n return np.array([x, y])", "def random_five(min_x, max_x):\n return random.sample(xrange(min_x, max_x), 5)", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def naive_act_norm_initialize(x, axis):\n x = np.asarray(x)\n axis = list(sorted(set([a + len(x.shape) if a < 0 else a for a in axis])))\n min_axis = np.min(axis)\n reduce_axis = tuple(a for a in range(len(x.shape)) if a not in axis)\n var_shape = [x.shape[a] for a in axis]\n var_shape_aligned = [x.shape[a] if a in axis else 1\n for a in range(min_axis, len(x.shape))]\n mean = np.reshape(np.mean(x, axis=reduce_axis), var_shape)\n bias = -mean\n scale = 1. / np.reshape(\n np.sqrt(np.mean((x - np.reshape(mean, var_shape_aligned)) ** 2,\n axis=reduce_axis)),\n var_shape\n )\n return scale, bias, var_shape_aligned", "def sampleUnif(x, n: int = 10000, seed: Optional[int] = None):\n rng = np.random.default_rng(seed)\n\n xMin, xMax = np.nanmin(x, axis=0), np.nanmax(x, axis=0)\n refSamples = rng.uniform(low=xMin.tolist(), high=xMax.tolist(), size=(n, xMin.shape[0]))\n\n assert refSamples.shape[1] == x.shape[1]\n return refSamples", "def noise_data(self, x):\n return x + np.random.normal(size=x.shape)", "def translate(x_range=0, y_range=0):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((1, 0, x),\r\n (0, 1, y),\r\n (0, 0, 1)), dtype=np.float)", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n\r\n resample_i = N.floor(N.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0., interpolation_order=1):\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two'\n ' floats. Received: %s' % (zoom_range,))\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,\n fill_mode=fill_mode, cval=cval,\n order=interpolation_order)\n return x", "def scale(inp: np.ndarray, new_min: float = 0., new_max: float = 1.,\n axis: int = -1) -> np.ndarray:\n xmax = inp.max(axis=axis, keepdims=True)\n xmin = inp.min(axis=axis, keepdims=True)\n a = (inp-xmin) / (xmax - xmin)\n y = a * (new_max - new_min) + new_min\n return y", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def scale_data(x_data):\n\n # Scale based on maximum\n x_max = np.amax(x_data)\n scaled_data = x_data / x_max\n return scaled_data", "def create_random_sample(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend(np.random.choice(idx_bins[i],size=x,replace=False))\n return idxs", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def get_ranx0(rng):\n xr = range(-100, 101)\n x1 = rng.choice(xr)\n x0 = (x1,)\n return x0", "def sample(self) -> np.ndarray:\n q_target = np.empty(self.d)\n for i in range(self.d):\n q_target[i] = self.lim_lo[i] + np.random.rand() * (self.lim_up[i] - self.lim_lo[i])\n return q_target", "def uniform_sample(n, m):\n interval = m / n\n indices = [0]\n index = 0.0\n while True:\n index += interval\n if index >= m - 1:\n indices.append(int(m - 1))\n break\n else:\n indices.append(int(index))\n\n return np.array(indices)", "def abnormal_detection(x_axis):\n bool_normal = (x_axis.mean() - 3 * x_axis.std() <= x_axis) & \\\n (x_axis <= x_axis.mean() + 3 * x_axis.std())\n result = x_axis[bool_normal]\n return result", "def _sample_loguniform_real_point(\n self, dimension: Real, below_points: numpy.ndarray, above_points: numpy.ndarray\n ) -> numpy.ndarray:\n return self._sample_real_point(\n dimension, below_points, above_points, is_log=True\n )", "def __call__(self, *args):\n r = np.random.rand(*args)\n if type(r) is float:\n samples = self.values[(r < self.p).nonzero()[0][0]]\n elif type(r) is np.ndarray:\n samples = np.array(\n [self.values[np.nonzero(x < self.p)[0][0]] \n for x in r.flat]).reshape(r.shape)\n return samples", "def sample_matrix(dim, bound):\n return np.random.uniform(low=-bound, high=bound, size=(dim, dim))", "def create_random_sample_alt(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend([ idx_bins[i][ind] for ind in unique_sample_of_int(len(idx_bins[i])-1,x) ] )\n return idxs", "def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x", "def sample_softmax(x : Union[List[float], np.ndarray], t : float = 1):\n x = np.array(x)\n x = x - np.max(x)\n x = np.exp(x / t)\n x = x / np.sum(x)\n return np.random.choice(range(len(x)), p = x)", "def get_logarithmic_axis_sample(a, b, N, dtype):\n assert a < b, \"condition a < b violated!\"\n assert a > 0, \"condition a > 0 violated!\"\n assert isinstance(N, int), \"condition N of type int violated!\"\n\n # convert input range into exponent range\n lexp = np.log(a)\n rexp = np.log(b)\n exp_range = np.linspace(lexp, rexp, N)\n\n data = []\n for n in range(exp_range.shape[0]):\n x = np.exp(exp_range[n])\n if dtype is int:\n data.append(int(x))\n elif dtype is float:\n data.append(x)\n else:\n raise AssertionError(\"dtype {} not supported for uniform sampling!\".format(dtype))\n return data", "def x0_rand(mu3,xb,num_min):\n Px_i = np.random.rand() * (xb[0,1] - xb[0,0] - 2*num_min) + xb[0,0] + \\\n num_min\n pk1_i, pk2_i = 1.0, 1.0\n while (pk1_i+pk2_i >= 1.0):\n pk1_i = np.random.rand() * (xb[1,1] - xb[1,0] - 2*num_min) + \\\n xb[1,0] + num_min\n pk2_i = np.random.rand() * (min(xb[2,1],1-pk1_i) - xb[2,0] - \\\n 2*num_min) + xb[2,0] + num_min\n mu1_i = np.random.rand() * (xb[3,1] - max(xb[3,0],2*mu3) - 2*num_min) + \\\n max(xb[3,0],2*mu3) + num_min\n mu2_i = np.random.rand() * (min(xb[4,1],mu1_i) - max(xb[4,0],mu3) - \\\n 2*num_min) + max(xb[4,0],mu3) + num_min\n return np.array([Px_i,pk1_i,pk2_i,mu1_i,mu2_i])", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", "def sample (self, n):\n y = self.bins\n x = np.r_[0, self.values.cumsum ()] / self.sum\n # interpolate inverse CDF\n out = np.interp (np.random.random (n), x, y)\n if n == 1:\n return out[0]\n else:\n return out.reshape ((n,))", "def subsample(y, lims):\n buckets = len(lims) - 1\n y_subs = np.zeros(buckets)\n for i in range(buckets):\n y_subs[i] = np.min(y[lims[i]:lims[i+1]])\n\n return y_subs", "def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]", "def noise(x: np.ndarray) -> np.ndarray:\n\n return np.random.normal(loc=MEAN, scale=1e-2, size=1)", "def sample(self, n_samples=1):\n\n\t\tsamples = np.random.random((n_samples, self.__ndims))*self.__range + self.__low\n\t\treturn samples", "def var(x, axis=None):\r\n x = asarray(x)\r\n # figure out sample size along the axis\r\n if axis is None:\r\n n = x.size\r\n else:\r\n n = x.shape[axis]\r\n # compute the sum of squares from the mean(s)\r\n sample_SS = sum(x ** 2, axis) - sum(x, axis) ** 2 / n\r\n return sample_SS / (n - 1)", "def random(self=None, sample=100, min=0, max=100):\r\n\t\treturn DataStatistics([randint(min, max) for i in range(sample)])", "def generate_random_scatter(x_range, w, b, k):\n\tx_1 = []\n\ty_1 = []\n\tx_2 = []\n\ty_2 = []\n\tfor i in range(k):\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_1.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b + amplitude\n\t\ty_1.append(yy)\n\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_2.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b - amplitude\n\t\ty_2.append(yy)\n\treturn x_1, y_1, x_2, y_2", "def custom_argmin(arr):\n return np.random.choice(np.flatnonzero(arr == arr.min()))", "def lhsample(N, bounds):\n \n D = len(bounds)\n sample = vstack(arange(a,b,(b-a)/N) for (a,b) in bounds).T + rand(N,D) / N \n for d in xrange(D): \n shuffle(sample[:,d])\n return sample", "def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def random_brightness(x, brightness_range):\n if len(brightness_range) != 2:\n raise ValueError('`brightness_range should be tuple or list of two floats. '\n 'Received arg: ', brightness_range)\n\n u = np.random.uniform(brightness_range[0], brightness_range[1])\n x = u * x\n\n return x", "def thinning_sampler(rng, lamb, xmin=0, lamb_min=1e-10):\n while lamb(xmin) > lamb_min:\n dx = -np.log(rng.rand()) / lamb(xmin)\n x = xmin + dx\n accept_rate = lamb(x) / lamb(xmin)\n\n if rng.rand() < accept_rate:\n return x\n xmin = x\n raise ValueError(\n f\"require lamb({xmin})>{lamb_min} to guarantee cdf(infty)=1\"\n )" ]
[ "0.6698139", "0.6558226", "0.65457267", "0.64248043", "0.6340408", "0.6254296", "0.61594844", "0.6148418", "0.60947704", "0.60901093", "0.5998522", "0.5891753", "0.5868367", "0.58548534", "0.5790514", "0.5744083", "0.5735349", "0.57298034", "0.57228523", "0.5720773", "0.5694697", "0.56932783", "0.56902844", "0.5680441", "0.5627093", "0.56042904", "0.5602855", "0.55887944", "0.5578321", "0.5570794", "0.5563616", "0.55588645", "0.5547898", "0.55314845", "0.55296904", "0.55205816", "0.5517501", "0.55137676", "0.5511837", "0.5509596", "0.5507557", "0.5505392", "0.5498944", "0.54835546", "0.5483243", "0.547789", "0.5477683", "0.54715914", "0.54690224", "0.54523987", "0.5438893", "0.5436108", "0.542254", "0.5415916", "0.54146683", "0.5405445", "0.53802294", "0.5370432", "0.536054", "0.5352187", "0.5350395", "0.53443956", "0.5342975", "0.53250027", "0.5319233", "0.5302199", "0.530089", "0.53007656", "0.5294612", "0.5290604", "0.5287535", "0.52799785", "0.5270537", "0.5263525", "0.5260026", "0.52589905", "0.5254856", "0.5254445", "0.5254314", "0.52484614", "0.52314925", "0.52312064", "0.5229239", "0.5227659", "0.5226755", "0.521929", "0.5213589", "0.52044785", "0.51941913", "0.51929206", "0.5191202", "0.5188624", "0.51875436", "0.51838356", "0.5181683", "0.51756066", "0.51645476", "0.5164329", "0.51613444", "0.5159732" ]
0.59077674
11
Quick plot of a `tick.base.TimeFunction`
def plot_timefunction(time_function, labels=None, n_points=300, show=True, ax=None): if ax is None: fig, ax = plt.subplots(1, 1, figsize=(4, 4)) else: show = False if time_function.is_constant: if labels is None: labels = ['value = %.3g' % time_function.border_value] t_values = np.arange(10).astype('float') ax.plot(t_values, time_function.value(t_values), label=labels[0]) else: if labels is None: interpolation_to_legend = { TimeFunction.InterLinear: 'Linear', TimeFunction.InterConstLeft: 'Constant on left', TimeFunction.InterConstRight: 'Constant on right' } border_to_legend = { TimeFunction.Border0: 'border zero', TimeFunction.BorderConstant: 'border constant at %.3g' % time_function.border_value, TimeFunction.BorderContinue: 'border continue', TimeFunction.Cyclic: 'cyclic' } labels = [ 'original points', '%s and %s' % (interpolation_to_legend[time_function.inter_mode], border_to_legend[time_function.border_type]) ] original_t = time_function.original_t if time_function.border_type == TimeFunction.Cyclic: cycle_length = original_t[-1] original_t = np.hstack((original_t, original_t + cycle_length, original_t + 2 * cycle_length)) t_values = _extended_discrete_xaxis(original_t, n_points=n_points) ax.plot(time_function.original_t, time_function.original_y, ls='', marker='o', label=labels[0]) ax.plot(t_values, time_function.value(t_values), label=labels[1]) ax.legend() if show is True: plt.show() return ax.figure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def plot_time_series(self, *args, **kwargs):\n return SimulationStaticVisualizer(self, *args, **kwargs)", "def plot_times(self, train_time, title=None, xmin=None, xmax=None,\n ymin=None, ymax=None, ax=None, show=True, color=None,\n xlabel=True, ylabel=True, legend=True, chance=True,\n label='Classif. score'):\n if not np.array(train_time).dtype is np.dtype('float'):\n raise ValueError('train_time must be float | list or array of '\n 'floats. Got %s.' % type(train_time))\n\n return plot_gat_times(self, train_time=train_time, title=title,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, ax=ax, show=show,\n color=color, xlabel=xlabel, ylabel=ylabel,\n legend=legend, chance=chance, label=label)", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)", "def plot():\n pass", "def visualize(x, y, xlabel=None, ylabel=None, title=None, ylim=None):\n total_seconds = (x[-1] - x[0]).total_seconds()\n if total_seconds <= 86400 * 1 * 3:\n return plot_one_day(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 7 * 2:\n return plot_one_week(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 30 * 1.5:\n return plot_one_month(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 90 * 1.5:\n return plot_one_quarter(x, y, xlabel, ylabel, title, ylim)\n \n elif total_seconds <= 86400 * 365 * 1.5:\n return plot_one_year(x, y, xlabel, ylabel, title, ylim)", "def matshow_tseries(time_series, fig=None, axis=0, xtick_n=5, time_unit=None,\r\n xlabel=None, ylabel=None):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.matshow(time_series.data)\r\n\r\n ax.set_xticks(list(range(len(this_time)))[::len(this_time) / xtick_n])\r\n ax.set_xticklabels(this_time[::len(this_time) / xtick_n])\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n return fig", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def plottf(tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,scale='log',\r\n normalize='n',):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=10*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=10*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(tfarray/np.max(abs(tfarray)))\r\n else:\r\n plottfarray=abs(tfarray)\r\n \r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.08\r\n plt.rcParams['figure.subplot.right']=.99\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n \r\n \r\n plt.figure(fignum)\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n vmin=vmin,vmax=vmax,cmap=cmap,interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n cmap=cmap,interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n plt.show()", "def running_time(func, counter, plot_type, *args, **kwargs):\n plots = []\n counter = 0\n #include counter\n output = (func(counter, *args, **kwargs)) #returns [value, counter]\n value, counter = output\n if plot_type.upper() == \"STANDARD\":\n plots.append(output)\n else:\n plots.append((log(float(value)), log(float(counter))))\n #print plots\n return plt.plot(plots)", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def plot_dt_signal(x, title=None):\n pylab.figure()\n pylab.stem(range(len(x)), x)\n pylab.title(title)\n pylab.xlabel(\"samples\")", "def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)", "def plot_time(signal,\n fs=None,\n ax=None,\n scale='linear',\n sides='onesided',\n title=None,\n label=None,\n **kwargs):\n if ax is None:\n ax = plt.gca()\n if fs is None:\n fs = 1\n ax.set_xlabel(\"Samples\")\n else:\n ax.set_xlabel(\"t / s\")\n t = _time_vector_onesided(signal, fs)\n if scale == 'linear':\n ax.set_ylabel('Amplitude (linear)')\n elif scale == 'db':\n signal = _db_calculation(signal)\n ax.set_ylabel('Amplitude / dB')\n else:\n raise NameError(\"Invalid scale\")\n if sides == 'onesided':\n ax.plot(t, signal, label=label, linewidth=2.0)\n elif sides == 'twosided':\n ax.plot(\n _time_vector_twosided(signal,\n fs),\n np.fft.fftshift(signal),\n label=label, linewidth=1.0)\n else:\n raise NameError(\"Invalid sides\")\n if title is not None:\n ax.set_title(title)\n ax.grid(True)\n ax.ticklabel_format(useOffset=False)\n return ax", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def plot_tseries(time_series, fig=None, axis=0,\r\n xticks=None, xunits=None, yticks=None, yunits=None,\r\n xlabel=None, ylabel=None, yerror=None, error_alpha=0.1,\r\n time_unit=None, **kwargs):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.plot(this_time, time_series.data.T, **kwargs)\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n if yerror is not None:\r\n if len(yerror.data.shape) == 1:\r\n this_e = yerror.data[np.newaxis, :]\r\n else:\r\n this_e = yerror.data\r\n delta = this_e\r\n e_u = time_series.data + delta\r\n e_d = time_series.data - delta\r\n for i in range(e_u.shape[0]):\r\n ax.fill_between(this_time, e_d[i], e_u[i], alpha=error_alpha)\r\n\r\n return fig", "def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()", "def plot_global(type):\n click.echo(click.style(\n \"Generating Plot....\", fg='cyan', bold='true'))\n plot_time_series.TimeSeriesPloTs.plot_global(type)\n click.echo(click.style(\n \"Done....\", fg='green', bold='true'))", "def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')", "def plot_timefrequency(z, time, f, signal=None, method=\"stft\"):\n\n if method == \"stft\":\n figure_title = \"Short-time Fourier Transform Magnitude\"\n fig, ax = plt.subplots()\n for i in range(len(time)):\n ax.plot(f, z[:, i], label=\"Segment\" + str(np.arange(len(time))[i] + 1))\n ax.legend()\n ax.set_title(\"Signal Spectrogram\")\n ax.set_ylabel(\"STFT Magnitude\")\n ax.set_xlabel(\"Frequency (Hz)\")\n\n elif method == \"cwt\":\n figure_title = \"Continuous Wavelet Transform Magnitude\"\n elif method == \"wvd\":\n figure_title = \"Wigner Ville Distrubution Spectrogram\"\n fig = plt.figure()\n plt.plot(time, signal)\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Signal\")\n\n elif method == \"pwvd\":\n figure_title = \"Pseudo Wigner Ville Distribution Spectrogram\"\n\n fig, ax = plt.subplots()\n spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap(\"magma\"), shading=\"auto\")\n plt.colorbar(spec)\n ax.set_title(figure_title)\n ax.set_ylabel(\"Frequency (Hz)\")\n ax.set_xlabel(\"Time (sec)\")\n return fig", "def __plot_T__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotTeVar.get() or not self.plotTiVar.get():\n return\n\n # Check for a closed window:\n if 'T' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['T'].number):\n del self.plots['T']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'T' in self.plots.keys()\n if refresh:\n if 'T' in self.plots.keys():\n fig = self.plots['T']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a Tew window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('T, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n if self.plotTeVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Te((self.it), self.ir)[0], 'r-', label='e')\n if self.plotTiVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Ti((self.it), self.ir)[0], 'b-', label='i')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('T (keV)', fontsize=12)\n ax.legend()\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['T'] = fig", "def plot_basic(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Basic Plotter')\n if lgnd != None:\n pylab.legend(lgnd)\n pylab.grid(True)\n pylab.show()", "def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]", "def plot_time(time_to_complete, plot_num):\n average = []\n for i, point in enumerate(time_to_complete):\n average.append(sum(time_to_complete[:i+1])/ (i+1))\n plt.plot(time_to_complete, color= 'blue', label=\"Epoch Time\")\n plt.plot(average, color = 'red', label= \"Average Time\", zorder = 3)\n plt.legend()\n plt.title(\"Time to complete FetchReach\")\n plt.ylabel(\"Time (seconds)\")\n plt.xlabel(\"Number iterations\")\n plt.savefig(\"./plots/time/time_to_complete_{}.png\".format(plot_num))\n plt.clf()", "def tick(self):", "def test_plot_timeseries_univariate(tmpdir, random):\n x = np.linspace(0, 10, 20)\n y = np.sin(x)\n segments = get_test_segments(data=np.expand_dims(y, 0))\n\n output_path = Path(tmpdir) / 'temp_visualization_test_univariate.png'\n\n plot_timeseries(x=x,\n y=y,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def callback_time_cut(val):\n global plot_mode\n global idx_time\n last_plot_mode = plot_mode\n plot_mode = 'time_cut'\n idx_time = int(val)\n update_num_shadow(int(sld['neighbors'].val))\n # plot 121\n lcuttime.set_xdata( [val, val] )\n lcuttime.set_alpha( alpha_hm )\n lcutfreq.set_alpha( 0.0 )\n # plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_time ) # [True/False, True/False]\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True ] )\n replot_light()\n reform_axis()\n\n fig.canvas.draw_idle()", "def test_plot_ts(kwargs):\n nchains = 4\n ndraws = 500\n obs_data = {\n \"y\": 2 * np.arange(1, 9) + 3,\n \"z\": 2 * np.arange(8, 12) + 3,\n }\n\n posterior_predictive = {\n \"y\": np.random.normal(\n (obs_data[\"y\"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[\"y\"]))\n ),\n \"z\": np.random.normal(\n (obs_data[\"z\"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[\"z\"]))\n ),\n }\n\n const_data = {\"x\": np.arange(1, 9), \"x_pred\": np.arange(8, 12)}\n\n idata = from_dict(\n observed_data=obs_data,\n posterior_predictive=posterior_predictive,\n constant_data=const_data,\n coords={\"obs_dim\": np.arange(1, 9), \"pred_dim\": np.arange(8, 12)},\n dims={\"y\": [\"obs_dim\"], \"z\": [\"pred_dim\"]},\n )\n\n ax = plot_ts(idata=idata, y=\"y\", show=True, **kwargs)\n assert np.all(ax)", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def ProfilePlot(t,y,z,scale=86400, axis=0,color=[0.5,0.5,0.5]):\r\n from matplotlib import collections\r\n from matplotlib.ticker import Formatter\r\n\r\n class MyFormatter(Formatter):\r\n def __init__(self, dates, fmt='%b %d %Y'):\r\n self.fmt = fmt\r\n self.dates = dates\r\n\r\n def __call__(self, x, pos=0):\r\n 'Return the label for time x s'\r\n return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)\r\n\r\n tsec = othertime.SecondsSince(t)\r\n formatter = MyFormatter(tsec)\r\n \r\n y = np.swapaxes(y,0,axis)\r\n \r\n lines=[]\r\n line2 =[]\r\n for ii, tt in enumerate(tsec):\r\n #xplot = set_scale(y[:,ii],tt)\r\n xplot = tt + y[:,ii]*scale\r\n lines.append(np.array((xplot,z)).T)\r\n line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)\r\n \r\n \r\n LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)\r\n LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis\r\n \r\n ax=plt.gca()\r\n ax.add_collection(LC1)\r\n ax.add_collection(LC2)\r\n ax.set_ylim((z.min(),z.max()))\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.set_xlim((tsec[0],tsec[-1]))\r\n plt.xticks(rotation=17) \r\n \r\n return ax", "def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def plot_series(self, t1=0, t2=100, t1p=None, t2p=None):\n \n plot_discretized(self.ts, self.ts_dis, t1=t1, t2=t2, t1p=t1p, t2p=t2p)", "def make_time_axis(axis=None,time_unit='s'):\n if axis is None:\n axis = plt.gca().xaxis\n class TimeFormatter(matplotlib.ticker.FuncFormatter):\n def __init__(self,time_unit):\n self.time_unit = time_unit\n conv = nitime.time_unit_conversion\n self.func = lambda x,y: (1.0*x) / conv[self.time_unit]\n def format_data_short(self,value):\n 'return a short string version'\n return \"%-12g\"%self.format_data(value)\n f = TimeFormatter(time_unit)\n axis.set_major_formatter(f)", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def test_plot_timeseries_multivariate(tmpdir, random):\n x = np.linspace(start=0, stop=10, num=20)\n ys = np.stack((np.sin(x), np.cos(x), np.tan(0.4 * x)))\n segments = get_test_segments(data=ys)\n output_path = Path(tmpdir) / 'temp_visualization_test_multivariate.png'\n\n plot_timeseries(x=x,\n y=ys.T,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def plot_time_series(df, plt):\n # Remove the plot frame lines\n delta = (df[\"timestamp\"].max() - df[\"timestamp\"].min()) / 10\n plt.xticks(\n np.arange(\n df[\"timestamp\"].min(),\n df[\"timestamp\"].max(),\n step=np.around(delta, decimals=1),\n )\n )\n plt.grid()", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot_curve(self, label='', axis=None):\n controller = deepcopy(self._controller)\n data = np.zeros(len(self._ts))\n for i in range(len(self._ts)):\n controller.step(self._dt)\n data[i] = controller.sim.get_output()\n \n if axis is None:\n plt.plot(self._ts, data, label=label)\n else:\n axis.plot(self._ts, data, label=label)", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)", "def lastTick():", "def SFplot(profile, time, Xlim=[], ax=None, **kwargs):\n X = profile.X\n sf = SF(profile, time, Xlim)\n clw = {'marker': '.', 'ls': 'none'}\n if 'label' not in kwargs:\n clw['label'] = profile.name+'_%.1fh_SF' % (time/3600)\n args = {**clw, **kwargs}\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(X, sf, **args)\n ax.set_xlabel('Mole fraction', fontsize=label_fontsize)\n ax.set_ylabel('Diffusion Coefficients '+'$(m^2/s)$', fontsize=label_fontsize)\n ax.set_xlim(plot_lim(X.min(), X.max()))\n ax.tick_params(labelsize=tick_fontsize)\n leg = ax.legend(numpoints=1, fontsize=leg_fontsize)\n leg.get_frame().set_linewidth(0.0)\n leg.set_draggable(True)\n plt.tight_layout()\n\n return ax", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def v_from_p_function(self):\r\n\r\n track_c = [] # p classical function,\r\n for i in range(len(self.dt.momentum_t)):\r\n track_c.append(self.dt.momentum_t[i] / self.dt.mass)\r\n\r\n (fig, ax) = plt.subplots()\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n track_c,\r\n linestyle=':',\r\n linewidth=1,\r\n color='b',\r\n label='classic',\r\n )\r\n\r\n # marker=\"+\", markersize = 13,\r\n # ax.plot(self.dt.momentum_t, self.observer.velT, linestyle=\" \",\r\n # color=\"k\",marker=\"+\", markersize = 13, label=\"measurement\")\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_t,\r\n linestyle=' ',\r\n color='k',\r\n marker='o',\r\n label='result of measurements',\r\n )\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vel_anl,\r\n linestyle='-',\r\n color='red',\r\n linewidth=1,\r\n label='continuum',\r\n )\r\n\r\n # Euler's method == analitical function. We not plot it.\r\n\r\n ax.plot(\r\n self.dt.momentum_t,\r\n self.dt.vn,\r\n linestyle='--',\r\n color='blue',\r\n marker='x',\r\n linewidth=1,\r\n label=\"Euler's method\",\r\n )\r\n\r\n # error\r\n\r\n ax.errorbar(self.dt.momentum_t, self.dt.vel_t, fmt='k ',\r\n yerr=self.dt.vel_t_err)\r\n\r\n xm = -1.0\r\n for i in range(len(self.dt.momentum_t)):\r\n if self.dt.momentum_t[i] > xm:\r\n xm = self.dt.momentum_t[i]\r\n stepx = round(xm / float(len(self.dt.momentum_t)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0, xm]) # xm = 0.85\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('p')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx) # step on x is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # line draw\r\n\r\n line = matplotlib.lines.Line2D([0.0, 9.0], [1.0, 1.0], color='b'\r\n )\r\n ax.add_line(line)\r\n plt.text(0.7, 1.01, u'light speed', horizontalalignment='center'\r\n )\r\n ax.set_ylim([0, 1.1])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('v')\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=0.1) # step on y is base=0.1\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n # pylab.show()\r\n\r\n plt.show()", "def plot_tseries(*args, **kwargs) :\n data = kwargs.pop('data')\n return data.dropna().plot(x=args[0], y=args[1], **kwargs)", "def plot_f(self, *args, **kwargs):\r\n kwargs['plot_raw'] = True\r\n self.plot(*args, **kwargs)", "def footprint_demo(**kw):\n # Note: needs fixed slits in single_point()\n count = 1500000\n data = []\n for theta in np.linspace(0.15, 5, 30):\n n = single_point(theta=theta, count=count, trace=False, **kw)\n data.append((theta, np.sum(n.active)))\n print(data[-1])\n x, y = zip(*data)\n pylab.plot(x, np.array(y)/count)\n pylab.show()", "def on_tick(self, time):\n pass", "def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def plot_timeseries(self, x, *a, **kw):\n v, t = self.timeseries(x, **kw)\n utcoffset = kw.pop('utcoffset', None)\n if utcoffset is not None: # temporary hack because plot_date seems to be ignoring tz kwarg...\n t += utcoffset/24.\n for k in ('convert', 'timeslice', 'rmnans'): \n trash = kw.pop(k, None)\n if not a: a = '-' # plot a line by default\n if 'label' not in kw: \n kw.update({'label': x.replace('platform ','').replace('_',' ')})\n if 'axes' in kw: # deal with possible bug in plot_date?\n ax = kw.pop('axes')\n ax.plot_date(t, v, *a, **kw)\n ax.set_xlim(ax.xaxis.get_data_interval()) # update time limits\n else: # just make a new axis\n plt.plot_date(t, v, *a, **kw)\n ax = plt.gca()\n plt.gcf().autofmt_xdate()\n return ax", "def plot_running_time(num_clusters):\n slow_running = []\n fast_running = []\n for dummy_i in range(2, num_clusters):\n cluster_list = gen_random_clusters(dummy_i)\n start = timer()\n fast_closest_pair(cluster_list)\n end = timer()\n fast_running.append((end - start))\n \n start = timer()\n slow_closest_pair(cluster_list)\n end = timer()\n slow_running.append((end - start))\n #\n plt.plot(range(2, num_clusters), fast_running)\n plt.plot(range(2, num_clusters), slow_running)\n plt.xlabel(\"num clusters\")\n plt.ylabel(\"running time in seconds\")\n plt.title(\"Running time slow closest pair vs fast closest pair.\")\n plt.legend([\"fast closest pair\", \"slow closest pair\"])\n plt.show()", "def diagrama_ts(S: [np.ndarray] or [list], T: [np.ndarray] or [list], title: str, hide_values: bool) -> (Figure, Axes):\n return diagram(S, T, title, \"TS\", hide_values)", "def time_support(*, scale=None, format=None, simplify=True):\n import matplotlib.units as units\n from matplotlib.ticker import MaxNLocator, ScalarFormatter\n\n from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar\n\n class AstropyTimeLocator(MaxNLocator):\n # Note: we default to AutoLocator since many time formats\n # can just use this.\n\n def __init__(self, converter, *args, **kwargs):\n kwargs[\"nbins\"] = 4\n super().__init__(*args, **kwargs)\n self._converter = converter\n\n def tick_values(self, vmin, vmax):\n # Where we put the ticks depends on the format we are using\n if self._converter.format in YMDHMS_FORMATS:\n # If we are here, we need to check what the range of values\n # is and decide how to find tick locations accordingly\n\n vrange = vmax - vmin\n\n if (\n self._converter.format != \"yday\" and vrange > 31\n ) or vrange > 366: # greater than a month\n # We need to be careful here since not all years and months have\n # the same length\n\n # Start off by converting the values from the range to\n # datetime objects, so that we can easily extract the year and\n # month.\n\n tmin = Time(\n vmin, scale=self._converter.scale, format=\"mjd\"\n ).datetime\n tmax = Time(\n vmax, scale=self._converter.scale, format=\"mjd\"\n ).datetime\n\n # Find the range of years\n ymin = tmin.year\n ymax = tmax.year\n\n if ymax > ymin + 1: # greater than a year\n # Find the step we want to use\n ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))\n\n ymin = ystep * (ymin // ystep)\n\n # Generate the years for these steps\n times = []\n for year in range(ymin, ymax + 1, ystep):\n times.append(datetime(year=year, month=1, day=1))\n\n else: # greater than a month but less than a year\n mmin = tmin.month\n mmax = tmax.month + 12 * (ymax - ymin)\n\n mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))\n\n mmin = mstep * max(1, mmin // mstep)\n\n # Generate the months for these steps\n times = []\n for month in range(mmin, mmax + 1, mstep):\n times.append(\n datetime(\n year=ymin + (month - 1) // 12,\n month=(month - 1) % 12 + 1,\n day=1,\n )\n )\n\n # Convert back to MJD\n values = Time(times, scale=self._converter.scale).mjd\n\n elif vrange > 1: # greater than a day\n self.set_params(steps=[1, 2, 5, 10])\n values = super().tick_values(vmin, vmax)\n\n else:\n # Determine ideal step\n dv = (vmax - vmin) / 3 * 24 << u.hourangle\n\n # And round to nearest sensible value\n dv = select_step_hour(dv).to_value(u.hourangle) / 24\n\n # Determine tick locations\n imin = np.ceil(vmin / dv)\n imax = np.floor(vmax / dv)\n values = np.arange(imin, imax + 1, dtype=np.int64) * dv\n\n else:\n values = super().tick_values(vmin, vmax)\n\n # Get rid of values outside of the input interval\n values = values[(values >= vmin) & (values <= vmax)]\n\n return values\n\n def __call__(self):\n vmin, vmax = self.axis.get_view_interval()\n return self.tick_values(vmin, vmax)\n\n class AstropyTimeFormatter(ScalarFormatter):\n def __init__(self, converter, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._converter = converter\n self.set_useOffset(False)\n self.set_scientific(False)\n\n def format_ticks(self, values):\n if len(values) == 0:\n return []\n if self._converter.format in YMDHMS_FORMATS:\n times = Time(values, format=\"mjd\", scale=self._converter.scale)\n formatted = getattr(times, self._converter.format)\n if self._converter.simplify:\n if self._converter.format in (\"fits\", \"iso\", \"isot\"):\n if all(x.endswith(\"00:00:00.000\") for x in formatted):\n split = \" \" if self._converter.format == \"iso\" else \"T\"\n formatted = [x.split(split)[0] for x in formatted]\n elif self._converter.format == \"yday\":\n if all(x.endswith(\":001:00:00:00.000\") for x in formatted):\n formatted = [x.split(\":\", 1)[0] for x in formatted]\n return formatted\n elif self._converter.format == \"byear_str\":\n return Time(\n values, format=\"byear\", scale=self._converter.scale\n ).byear_str\n elif self._converter.format == \"jyear_str\":\n return Time(\n values, format=\"jyear\", scale=self._converter.scale\n ).jyear_str\n else:\n return super().format_ticks(values)\n\n class MplTimeConverter(units.ConversionInterface):\n def __init__(self, scale=None, format=None, simplify=None):\n super().__init__()\n\n self.format = format\n self.scale = scale\n self.simplify = simplify\n\n # Keep track of original converter in case the context manager is\n # used in a nested way.\n self._original_converter = units.registry.get(Time)\n\n units.registry[Time] = self\n\n @property\n def format(self):\n return self._format\n\n @format.setter\n def format(self, value):\n if value in UNSUPPORTED_FORMATS:\n raise ValueError(f\"time_support does not support format={value}\")\n self._format = value\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n if self._original_converter is None:\n del units.registry[Time]\n else:\n units.registry[Time] = self._original_converter\n\n def default_units(self, x, axis):\n if isinstance(x, tuple):\n x = x[0]\n if self.format is None:\n self.format = x.format\n if self.scale is None:\n self.scale = x.scale\n return \"astropy_time\"\n\n def convert(self, value, unit, axis):\n \"\"\"\n Convert a Time value to a scalar or array.\n \"\"\"\n scaled = getattr(value, self.scale)\n if self.format in YMDHMS_FORMATS:\n return scaled.mjd\n elif self.format == \"byear_str\":\n return scaled.byear\n elif self.format == \"jyear_str\":\n return scaled.jyear\n else:\n return getattr(scaled, self.format)\n\n def axisinfo(self, unit, axis):\n \"\"\"\n Return major and minor tick locators and formatters.\n \"\"\"\n majloc = AstropyTimeLocator(self)\n majfmt = AstropyTimeFormatter(self)\n return units.AxisInfo(\n majfmt=majfmt, majloc=majloc, label=f\"Time ({self.scale})\"\n )\n\n return MplTimeConverter(scale=scale, format=format, simplify=simplify)", "def time_series(t, f=0.02):\n T = t.size\n # Seasonal component and time-varying trend component\n ys = np.sin(2 * np.pi * f * t) * 0.6 + np.sin(1 / 5 * 2 * np.pi * f * t) * 0.2\n # Amplitude modulation component\n amp_mod = 0.5 * np.sin(1 / 6 * 2 * np.pi * f * t) + 0.8\n ys *= amp_mod\n ys = np.reshape(ys, (T,1))\n return ys", "def make_plot(x,y):", "def plot_running_time():\n global counter\n counter += 1\n running_time_targeted = []\n running_time_fast_targeted = []\n \n for node_number in range(10, 1000, 10):\n synthetic_undirected_graph = make_synthetic_undirected_graph(node_number, 5)\n\n start_time = time.time()\n attack_order = targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_targeted.append(stop_time - start_time)\n \n start_time = time.time()\n attack_order = fast_targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_fast_targeted.append(stop_time - start_time)\n \n plt.plot(range(10, 1000, 10), running_time_targeted, '-b', label = 'targeted_order')\n plt.plot(range(10, 1000, 10), running_time_fast_targeted, '-r', label = 'fast_targeted_order')\n \n plt.legend(loc='upper right')\n\n\n plt.title(\" plot of running time of desktop Python\")\n plt.xlabel(\"the number of nodes\")\n plt.ylabel(\"running times\")\n plt.savefig(\"running_time_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping", "def plot(self):\n\t\t\n\t\tfig,p1=_plt.subplots()\n\t\tp1.plot(self.time*1e3,self.qStar,label=r'q$^*$')\n\t\tp1.plot(self.time*1e3,self.qStarCorrected,label=r'q$^* * 1.15$')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel=r'q$^*$',ylim=[1,5])\n\t\t_plot.finalizeFigure(fig,title=self.title)", "def pyts_time_series(x):\n p = circle(x, r=0.2, a=0, b=0, x_lim=(-0.2, 0.2))\n y = - circle(x, r=0.2, a=0.4, b=0, x_lim=(0.2, 0.6))\n t = 0\n s = circle(x, r=0.125, a=0.9, b=-0.025, x_lim=(0.775, 0.9))\n s += 0.1 * (x >= 0.9) * (x <= 1.2)\n return p + y + t + s", "def plotPerTimeStamp(options):\n name = options['name'] + '_' + options['scan'] + '_perTime'\n if options['extra']:\n name += '_' + options['extra']\n f = openRootFileR(options['name']+'_perTime')\n histname = plotName(name, timestamp=False)\n filename = plotName(name, timestamp=True)\n filepath = plotPath(name, timestamp=True)\n print '<<< Save plot:', filepath\n hist = f.Get(histname)\n hist.SetErrorOption(options['error'])\n if options['big']:\n canvas = TCanvas('c', '', 8000, 1200)\n else:\n canvas = TCanvas('c', '', 1400, 500)\n canvas.SetLogy(options['logy'])\n gStyle.SetOptStat(options['optstat'])\n hist.Draw()\n gPad.Update()\n hist.GetXaxis().SetTimeDisplay(1)\n hist.GetXaxis().SetTimeFormat('#splitline{%d.%m.%y}{%H:%M:%S}%F1969-12-31' \\\n +' 22:00:00')\n hist.GetXaxis().SetLabelOffset(0.03)\n hist.GetXaxis().SetTitle('')\n if 'xmin' in options and 'xmax' in options:\n hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax'])\n hist.GetYaxis().SetTitle(options['ytitle'])\n hist.GetYaxis().SetTitleOffset(1.2)\n for axis in [hist.GetXaxis(), hist.GetYaxis()]:\n axis.SetTitleFont(133)\n axis.SetTitleSize(16)\n axis.SetLabelFont(133)\n axis.SetLabelSize(12)\n axis.CenterTitle()\n if options['big']:\n axis.SetTickLength(0.01)\n if options['big']:\n hist.GetYaxis().SetTitleOffset(0.25)\n drawSignature(filename)\n gPad.Modified()\n gPad.Update()\n if options['retrn']:\n return [canvas, hist, f]\n else:\n canvas.Print(filepath)\n canvas.Close()\n closeRootFile(f, options['name']+'_perTime')", "def plot_v(t, v):\n p1 = plt.plot(t,v)\n plt.xlabel('Time [s]')\n plt.ylabel('Velocity [m/s]')\n plt.title('Velocity for the skydiver as a function of time')\n plt.show()\n plt.savefig('Parachute_velocity.png')", "def draw_plot(yscale='linear'):\n plt.yscale(yscale)\n plt.xticks(list(range(0, 101, 5)))\n plt.xlabel('percentile [%]')\n plt.grid(True)\n plt.ylabel('operation time [ns]')\n plt.legend()\n plt.show()", "def tick(self, dt):\n pass", "def plot(self, set_xticks_from_index=True,\n xlabel=None,\n ylabel=None,\n colormap='jet',\n logx=False,\n logy=None,\n grid=True,\n linewidth=2,\n rot=None,\n dpi=None,\n fontsize=14,\n specs_fontsize=None,\n tick_fontsize=None,\n label_fontsize=None,\n legend_fontsize=None,\n figsize=None,\n specs_position='left',\n debug_plotfs=False,\n pause_timefs=0.1,\n modules=None,\n use_frame=False,\n sp_argID=0,\n sp_ncols=-1,\n sp_sharey=None,\n sp_title_position='center',\n sp_title_fontsize=None,\n sp_show_specs=True,\n save=None,\n **kwargs):\n\n # Get dataframe and dtype\n dtype = self.dtype\n df = self.__df_timings\n multiindex = self.multiindex\n\n style.use('bmh' if use_frame else 'fivethirtyeight')\n\n # Setup styles\n available_linestyles = ['-.', '--', '-']\n extls = np.resize(available_linestyles, df.shape[1]).tolist()\n\n # Get plot params - logy, ylabel\n if ylabel is None:\n ylabel_map = {'timings': 'Runtime [s]', 'speedups': 'Speedup [x]', 'scaled_timings': 'Scaled Runtime [x]'}\n ylabel = ylabel_map[dtype]\n\n if logy is None:\n logy_map = {'timings': True, 'speedups': False, 'scaled_timings': False}\n logy = logy_map[dtype]\n\n groupings_done = False\n if multiindex:\n # Get groupings\n groupings_done, ncols, out = _get_groupings(df, sp_argID)\n if not groupings_done:\n warnings.warn('Groupings are not possible. Hence, resorting to normal plot.', stacklevel=2)\n\n if not groupings_done: # normal plot\n is_xticks_number, xticks = _getxticks(df, set_xticks_from_index=set_xticks_from_index)\n\n if not is_xticks_number and logx:\n xticks = None\n warnings.warn('Some xticks might be not be seen.', stacklevel=2)\n\n tick_fontsize = _assign_mplibrcparams(fontsize, specs_fontsize, tick_fontsize,\n label_fontsize, legend_fontsize, specs_position, dpi)\n\n # Plot using dataframe data and its attributes\n ax = df.plot(style=extls,\n colormap=_truncate_cmap(colormap),\n title=specs(modules=modules),\n rot=rot,\n fontsize=tick_fontsize,\n linewidth=linewidth,\n logx=logx,\n logy=logy,\n figsize=figsize,\n xticks=xticks,\n **kwargs)\n\n if grid:\n ax.grid(True, which=\"both\", ls=\"-\")\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n if set_xticks_from_index and is_xticks_number:\n ax.set_xticklabels(df.index)\n if not is_xticks_number: # earlier set_xticks_from_index or not is_xticks_number\n ax.set_xticks([], minor=True)\n\n # Show in fullscreen\n fullscreenfig(ax, pause_timefs, print_info=debug_plotfs)\n ax_fig = ax.figure\n else: # subplot\n _SUBPLOT_FIGSIZE = (6.4, 3.6)\n\n if figsize is None:\n figsize = _SUBPLOT_FIGSIZE\n\n if sp_ncols == -1:\n sp_ncols = ncols\n\n tick_fontsize, specs_fontsize = _assign_mplibrcparams_for_subplot(fontsize, label_fontsize, sp_title_fontsize,\n legend_fontsize, tick_fontsize, sp_title_position,\n specs_fontsize, dpi)\n\n len1 = len(out)\n nrows = int(np.ceil(len1 / float(sp_ncols)))\n r, c = np.unravel_index(np.arange(len1), (nrows, sp_ncols))\n\n map_dict = {None: False, 'row': 'row', 'r': 'row', 'global': True, 'g': True}\n sharey_val = _mapvals(sp_sharey, map_dict, name='subplot_sharey')\n\n # Setup title str for each subplot\n title_str = _subplot_title_str(df, sp_argID)\n\n df0 = out[list(out.keys())[0]]\n is_xticks_number, xticks = _getxticks(df0, set_xticks_from_index=set_xticks_from_index)\n\n if not is_xticks_number and logx:\n xticks = None\n warnings.warn('Some xticks might be not be seen.', stacklevel=2)\n\n figS = (sp_ncols * figsize[0], nrows * figsize[1])\n fig, axs = plt.subplots(nrows, sp_ncols, sharex='col', constrained_layout=True, sharey=sharey_val, figsize=figS)\n axs = axs.reshape(-1, sp_ncols)\n\n for i, (k, df0) in enumerate(out.items()):\n ri, ci = r[i], c[i]\n ax = axs[ri, ci]\n is_first_subplot = (ri == 0) and (ci == 0)\n\n subplot_title = str(k)\n if is_first_subplot:\n subplot_title = title_str + subplot_title\n\n df0.plot(ax=ax,\n style=extls,\n linewidth=linewidth,\n colormap=_truncate_cmap(colormap),\n xticks=xticks,\n logx=logx,\n logy=logy,\n rot=rot,\n title=subplot_title,\n fontsize=tick_fontsize,\n legend=i == 0,\n **kwargs)\n\n if grid:\n ax.grid(True, which=\"both\", ls=\"-\")\n\n # Next two IFs are needed for logx=True plots\n if set_xticks_from_index and is_xticks_number:\n ax.set_xticklabels(df0.index)\n if not is_xticks_number: # earlier set_xticks_from_index or not is_xticks_number\n ax.set_xticks([], minor=True)\n\n # Set ylabel on middle row ID\n axs[nrows // 2, 0].set_ylabel(ylabel)\n\n if sp_show_specs:\n fig.suptitle(specs_short(), fontsize=specs_fontsize)\n\n # remove unused axes\n xlabel = df.index.names[sp_argID]\n r, c = np.unravel_index(np.arange(len1, axs.size), (nrows, sp_ncols))\n for (i, j) in zip(r, c):\n ax = axs[i, j]\n ax.set_xlabel(xlabel)\n ax.tick_params(labelrotation=rot)\n ax.yaxis.set_visible(False)\n ax.patch.set_visible(False)\n plt.setp(ax.spines.values(), visible=False)\n ax_fig = fig\n\n # Save figure\n if save is not None:\n ax_fig.savefig(save, bbox_inches='tight')\n return", "def show_events(ticks):\n for tick in i_prune_ticks(ticks):\n print(tick)", "def stft_plot(x: np.ndarray, *args, **kwargs) -> go.Figure:\n\n # calculate stft\n f, t, x_stft = signal.stft(x, *args, **kwargs)\n\n # compensate for scaling factor used in scipy's stft\n if \"window\" in kwargs.keys():\n if isinstance(kwargs[\"window\"], str):\n window = signal.get_window(kwargs[\"window\"], kwargs[\"nperseg\"])\n else:\n window = kwargs[\"window\"]\n factor = window.sum()\n else:\n factor = kwargs[\"nperseg\"]\n x_stft *= factor\n\n # plot\n power = 20 * np.log10(np.abs(x_stft))\n fig = px.imshow(power, x=t, y=f, aspect=\"auto\", origin=\"lower\")\n fig.update_yaxes(title=\"Frequency\")\n fig.update_xaxes(title=\"Time\")\n fig.update_layout(coloraxis_colorbar=dict(title=\"[dB]\"))\n fig.show()\n\n return fig", "def plotting(x,y,name,variable,unit,label_name=\"Simulation\",title=None,mins=False):\n ax = plt.figure(str(name))\n # ax.legend(\"best\")\n\n if mins:\n x/=60 #change time to mins from secs\n plt.xlabel(\"t [min]\")\n else:\n plt.xlabel(\"t [s]\")\n\n if title!= None:\n plt.title(str(title))\n\n plt.plot(x-x[0],y,label=label_name)\n\n\n lab = str(str(variable)+\" \"+\"[\"+unit+\"]\")\n plt.legend(loc='best')\n plt.ylabel(lab)\n plt.grid(True)\n # plt.savefig(title)\n plt.show()\n\n return ax", "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax", "def plot2dTimeSeries(values, title='series', xLabel='time', yLabel='values', savePath='.'):\n plt.plot(values)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.xticks(np.linspace(0, len(values), 11))\n plt.title(title)\n plt.savefig(f'{savePath}/{title}.png')\n plt.show(block=False)\n plt.pause(2)\n plt.close()", "def paper_plot_tstats_times(self, tstat, indexes, time=None, freq=None, params = None, er = None, domain = None, yrange = None, label= None, lande=None,\n xi_range=None, s_range=None, pred=None, compare=None, pos =None):\n\n if pos is None:\n pos = 'both'\n\n multiply_se_by = 1.96\n\n if label is None:\n label = tstat\n\n no_undertext = False\n no_legend = False\n if tstat == 'd2ax_per_mut_input':\n no_legend = True\n\n possible_compare_stats = ['d2ax_scaled_per_mut_input']\n\n if time is None:\n time = False\n\n if compare is None:\n compare = False\n if pos != 'both':\n compare = False\n\n if er is None:\n er = True\n if freq is None:\n freq = False\n if lande is None:\n lande = False\n\n if freq:\n loglinear = False\n else:\n loglinear = True\n\n if s_range is None:\n sr = False\n else:\n sr = True\n if xi_range is None:\n xir = False\n else:\n xir = True\n if pred is None:\n pred = False\n legend_with_dclass_param = False\n if params is not None:\n legend_with_dclass_param = True\n\n data_classes = [self.data_classes[indi] for indi in indexes]\n\n if lande is None:\n if data_classes[0]._THERE_ARE_FINAL_TRAJ_NON_LANDE:\n lande = False\n else:\n lande = True\n\n only_lande = False\n for dc in data_classes:\n if 'U' not in dc.param_dict:\n only_lande = True\n lande = True\n #if there are no nonlande trajectories then we must use the Lande ones\n if not data_classes[0]._THERE_ARE_FINAL_TRAJ_NON_LANDE:\n only_lande = True\n lande = True\n\n if only_lande:\n undertext_params = [['N', 'Vs'], ['shift_s0', 'sigma_0_del']]\n else:\n undertext_params = [['N', 'U'], ['shift_s0', 'sigma_0_del'], ['E2Ns', 'V2Ns']]\n\n if tstat not in data_classes[0]._tstats:\n print(str(tstat) + ' not in dataclass ')\n return\n\n if freq or tstat not in possible_compare_stats:# or len(data_classes) >1:\n compare = False\n\n\n plot_dict = dict()\n plotspecs = dict()\n plotspecs['legend_anchor'] = 'upper left'\n plotspecs['legend_loc'] = (1.02, 1.03)\n plot_dict['savedir'] = self.base_dir\n plotspecs['fsize'] = (28, 16)\n plotspecs['dpi'] = 200\n plotspecs['linewidth'] = 1.5\n plotspecs['ticksize'] = 30\n plotspecs['legend_font'] = {'size': '54'}\n plotspecs['axis_font'] = {'fontname': 'Arial', 'size': '28'}\n plot_dict['linestyles'] = ['-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-']\n plotspecs['marker_size'] = 15\n plotspecs['cap_size'] = 20\n #plotspecs['nxticks'] = 2\n plotspecs['undertext_font'] = {'color': 'black', 'weight': 'roman', 'size': '16'}\n extra_text = ''\n\n #print groupings\n\n # plotspecs['fsize'] = (17, 11)\n # plotspecs['legend_anchor'] = 'upper right'\n # plotspecs['legend_loc'] = (0.98, 0.98)\n # plotspecs['legend_anchor'] = 'upper right'\n # if tstat == 'mean_freq':\n # plotspecs['legend_loc'] = (0.98,0.98)\n # elif tstat == 'mean_adx':\n # plotspecs['legend_loc'] = (0.98, 0.45)\n #\n # plotspecs['ticksize'] = 35\n # plotspecs['linewidth'] = 5\n # plotspecs['marker_size'] = 20\n # #plotspecs['nyticks'] = 2\n # plotspecs['axis_font'] = {'fontname': 'Arial', 'size': '44'}\n # plotspecs['legend_font'] = {'size': '38'}\n\n\n if not no_undertext and len(data_classes)==1:\n undertext = []\n #str(int(data_class.number_population_simulations()))\n if not lande:\n number_runs_string = \"Obtained from \" + str(int(data_classes[0].number_allele_pairs(False,final=False))) + \\\n \" allele traj's using NonLande D(t) averaged over \" \\\n +str(int(data_classes[0].number_population_simulations()))\\\n + \" population sims with parameters:\"\n elif lande and only_lande:\n number_runs_string = \"Obtained from \" + str(int(data_classes[0].number_allele_pairs(True, final=False))) + \\\n \" allele traj's for each effect size, with Lande D(t) and parameters:\"\n elif lande and not only_lande:\n number_runs_string1st = \"NonLande obtained from \" + str(int(data_classes[0].number_allele_pairs(False,final=False))) + \\\n \" allele traj's with NonLande D(t) averaged over \" \\\n +str(int(data_classes[0].number_population_simulations()))+ \" population sims\"\n undertext.append(number_runs_string1st)\n number_runs_string = \"Lande obtained from \" + str(int(data_classes[0].number_allele_pairs(True, final=False))) + \\\n \" allele traj's for each effect size, with corresponding Lande D(t)\"\n undertext.append(number_runs_string)\n for listi in undertext_params:\n text_list = self._plot_text(index_list=[indexes[0]],params_list=listi)\n if text_list:\n text_string = ', '.join(text_list)\n undertext.append(text_string )\n plot_dict['undertext'] = undertext#data_class.plot_text()\n\n if not no_legend:\n if len(data_classes) ==1:\n if compare:\n if tstat == 'd2ax_scaled_per_mut_input':\n anal_name = r\"$(1-D_{L}($\"+str(int(time))+ r\"$)/\\Lambda)\\cdot v(a)$\"\n else:\n anal_name ='Analytic'\n plot_dict['ynames'] = ['Simulations', anal_name]\n if lande and not only_lande:\n plot_dict['ynames'] = ['NonLande', 'lande']\n if compare:\n plot_dict['ynames'] = ['NonLande', 'lande','Analytic']\n else:\n if lande and not only_lande:\n plot_dict['groupings_labels_within'] = ['NonLande', 'lande']\n if compare:\n plot_dict['groupings_labels_within'] = ['NonLande', 'lande','Analytic']\n if compare:\n plot_dict['groupings_labels_within'] = ['Simulations', 'Analytic']\n\n if freq:\n if pred:\n plot_dict['xlabel'] = r'$x_i $ percentile'\n else:\n plot_dict['xlabel'] = r'$x_i$'\n else:\n plot_dict['xlabel'] = \"Effect size squared (\" + r\"$S=a^2$\" + ')'\n if pred:\n if not legend_with_dclass_param:\n plot_dict['legend_title'] = 'Percentile of \\n initial frequency_pos'\n\n\n plot_dict['savedir'] = os.path.join(plot_dict['savedir'],'tstats_times')\n if pred:\n plot_dict['savedir'] = os.path.join(plot_dict['savedir'],'pred')\n if freq:\n plot_dict['savedir'] = os.path.join(plot_dict['savedir'],'S_sorted')\n else:\n plot_dict['savedir'] = os.path.join(plot_dict['savedir'], 'XI_sorted')\n\n savedir = os.path.join(self.base_dir, 'tstats_times')\n\n if pred:\n savedir = os.path.join(savedir, 'pred')\n if freq:\n savedir = os.path.join(savedir, 'S_sorted')\n else:\n savedir = os.path.join(savedir, 'XI_sorted')\n\n if pos == 'both':\n label = 'dpn_' + label\n savedir = os.path.join(savedir, 'combined')\n elif pos == 'pos':\n label ='p_' + label\n savedir = os.path.join(savedir,'positives')\n else:\n label = 'n_' + label\n savedir = os.path.join(savedir, 'negatives')\n\n\n if domain is None:\n if not freq:\n epsl = 0.05\n epsh =2\n domain = [0.1-epsl,100+epsh ]\n\n plot_dict['domain']= domain\n plot_dict['yrange'] = yrange\n\n at_time_text = ' at generation '\n\n if time:\n at_time_text += str(time)\n else:\n time = data_classes[0].get_phase_two_time()\n\n plot_dict['ylabel'] = self.name_class.yname(tstat)\n\n if tstat == 'd2ax_per_mut_input':\n if pos == 'both':\n plot_dict['ylabel'] = 'Contribution to change in mean\\n phenotype '\n if data_classes[0].units_s0:\n plot_dict['ylabel'] += r' (units trait SD)'\n else:\n plot_dict['ylabel'] += r' (units $\\omega/\\sqrt{2N}$)'\n plot_dict['ylabel']+= ' per\\n unit mutational input'\n elif tstat == 'x_per_seg_var' or tstat == 'x_per_seg_var':\n #plot_dict['ylabel'] = 'Average increased frequency of\\n aligned alleles'\n if pos == 'both':\n plot_dict['ylabel'] = 'Increased frequency of aligned\\n alleles per seg variant'\n else:\n plot_dict['ylabel'] = 'Average allele frequency'\n\n\n plot_dict['marker'] = True\n if compare and (not lande or only_lande):\n if len(data_classes) == 1:\n plot_dict['colors'] = ['deepskyblue','black']\n\n\n ts_string = '_' +tstat\n _mylabel = ts_string\n\n\n less = False\n\n x = []\n y = []\n yer = []\n ynames = []\n\n #maybe\n x_other = []\n y_other = []\n yer_other = []\n ynames_other = []\n\n\n\n tstati = tstat\n\n\n for data_class in data_classes:\n\n\n if 'var_0' in data_class.param_dict:\n var_0 = data_class.param_dict['var_0']\n else:\n var_0 = data_class.param_dict['sigma_0'] ** 2\n N = data_class.param_dict['N']\n Vs = data_class.param_dict['Vs']\n var_0_delta_square = var_0 * 2 * N / float(Vs)\n sig_0_del = np.sqrt(var_0_delta_square)\n\n if 'shift_s0' in data_class.param_dict:\n D_sig_0 = data_class.param_dict['shift_s0']\n D_del = sig_0_del * D_sig_0\n\n x_1 = defaultdict(list)\n y_1 = defaultdict(list)\n yer_1 = defaultdict(list)\n ynames_1 = dict()\n\n x_theory_1 = []\n y_theory_1 = []\n\n if lande and not only_lande:\n x_1_other = defaultdict(list)\n y_1_other = defaultdict(list)\n yer_1_other = defaultdict(list)\n ynames_1_other = dict()\n\n\n triples = deepcopy(data_class.tuples)\n if xir:\n remov = []\n xilow = xi_range[0]\n xihigh = xi_range[1]\n for trip in triples:\n if pred:\n XPI = data_class.xpercentile_dict[trip]\n else:\n XPI = trip[1]\n if XPI < xilow or XPI > xihigh:\n remov.append(trip)\n for trip in remov:\n triples.remove(trip)\n if sr:\n #print 's_range', s_range\n remov = []\n slow = s_range[0]\n shigh = s_range[1]\n for trip in triples:\n s = trip[0]\n if s < slow or s > shigh:\n remov.append(trip)\n for trip in remov:\n triples.remove(trip)\n\n name = ''\n lib = ''\n if params is None:\n # if len(data_classes) > 1:\n # name += str(data_class.index)\n lib += str(data_class.index)\n else:\n for param in params:\n try:\n name += param + ' = ' + '{0:.2f}'.format(data_class.param_dict[param]) + ' '\n lib += param + '{0:.0f}'.format(data_class.param_dict[param]) + '_'\n except KeyError:\n print('KeyError: ' + param)\n\n\n stat_dict = data_class.read_tstats(tuples=triples, requiredstats=[tstati],lande=only_lande)\n\n if lande and not only_lande:\n stat_dict_other = data_class.read_tstats(tuples=triples, requiredstats=[tstati], lande=True)\n print('lande stat dict')\n\n\n for triple in triples:\n\n name_1 = ''\n\n times = sorted(stat_dict[triple][tstati].keys())\n if time:\n time_real = find_nearest(times,time)\n\n y_val_2 = stat_dict[triple][tstati][time_real][pos]['mean']\n y_val_er_2 = stat_dict[triple][tstati][time_real][pos]['se']\n if y_val_2 == 0.0:\n print('yval is zero for S ', triple[0])\n if lande and not only_lande:\n y_val_2_other = stat_dict_other[triple][tstati][time_real][pos]['mean']\n y_val_er_2_other = stat_dict_other[triple][tstati][time_real][pos]['se']\n XI = triple[1]\n if XI < 0:\n less = True\n XI = -XI\n else:\n less = False\n S = triple[0]\n if freq:\n lenformat = 1\n if pred and not less:\n val = data_class.xpercentile_dict[triple]\n else:\n val = XI\n # if root_s:\n # name_1 = r'$\\sqrt{S} =$ '\n # else:\n # name_1 = r'$S =$ '\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = S\n else:\n #key =XI\n val = S\n lenformat = 3\n if xir:\n if xi_range[1] < 1:\n lenformat = 2\n if pred:\n key = data_class.xpercentile_dict[triple]\n if key < 0.91:\n key = round(key,1)\n elif key <0.991:\n key = round(key,2)\n else:\n key = round(key,3)\n if key < 0:\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = -key\n # name_1 = ''\n else:\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = XI\n # name_1 = ''\n if less:\n lenformat =0\n x_1[key].append(val)\n y_1[key].append(y_val_2)\n\n if lande and not only_lande:\n x_1_other[key].append(val)\n y_1_other[key].append(y_val_2_other)\n\n if er:\n yer_1[key].append(y_val_er_2*multiply_se_by)\n if lande and not only_lande:\n yer_1_other[key].append(y_val_er_2_other*multiply_se_by)\n if legend_with_dclass_param:\n name_1 += self.name_class.param_text(value=key, digits=2)\n else:\n name_1 += self.name_class.param_text(value=key, digits=lenformat) + ' ' + name\n\n\n name_2_lande = 'lande: '+ name_1\n\n if name_1 not in ynames_1:\n ynames_1[key] = name_1\n if lande and not only_lande:\n if name_2_lande not in ynames_1_other:\n ynames_1_other[key] = name_2_lande\n\n x.append(x_1)\n y.append(y_1)\n yer.append(yer_1)\n ynames.append(ynames_1)\n if lande and not only_lande:\n x_other.append(x_1_other)\n y_other.append(y_1_other)\n yer_other.append(yer_1_other)\n ynames_other.append(ynames_1_other)\n\n time_string = \"generation_\" + str(time_real)\n savedir = os.path.join(savedir, time_string)\n plot_dict['savedir'] = savedir\n\n if compare:\n kkey = list(x_1.keys())[0]\n smin = min(x_1[kkey])\n smax = max(x_1[kkey])\n xtheorylowers = [xx for xx in np.linspace(smin, smin+1, 50)]\n xtheoryhighers = [xx for xx in np.linspace(smin+1, smax, 100)]\n x_theory = xtheorylowers+xtheoryhighers[1:]\n s_theory = [ssi for ssi in x_theory]\n y_theory = [0 for _ in x_theory]\n if tstat == 'd2ax_scaled_per_mut_input':\n frac_integral_lande_dt = 1.0 - data_class._get_lande_dist_over_shift_at_time(time)\n if pos == 'both':\n y_theory = [frac_integral_lande_dt*myv(np.sqrt(ss)) for ss\n in s_theory]\n\n x_theory_1.append(x_theory)\n y_theory_1.append(y_theory)\n\n _mylabel = _mylabel + lib\n\n if len(data_classes)>1:\n if less:\n if not freq:\n plot_dict['legend_title'] = r'Shift (units $\\sigma_0$)'\n if legend_with_dclass_param:\n plot_dict['legend_title'] = self.name_class.param_text(name=params[0])\n\n\n x_2 =[]\n y_2 =[]\n yer_2 = []\n ynames_2 = []\n jj = 0\n groupings = []\n keys_list = []\n # for k in xrange(len(x)):\n # keys_list+=x[k].keys()\n # keys_list = list(set(keys_list))\n\n for key in sorted(x[0]):\n seti = set()\n for k in range(len(x)):\n if er:\n zipi = list(zip(*sorted(zip(x[k][key], y[k][key], yer[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n yer_temp = zipi[2]\n yer_2.append(yer_temp)\n else:\n zipi = list(zip(*sorted(zip(x[k][key], y[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n x_2.append(x_temp)\n y_2.append(y_temp)\n ynames_2.append(ynames[k][key])\n seti.add(jj)\n jj += 1\n\n if lande and not only_lande:\n if er:\n zipi = list(zip(*sorted(zip(x_other[k][key], y_other[k][key], yer_other[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n yer_temp = zipi[2]\n yer_2.append(yer_temp)\n else:\n zipi = list(zip(*sorted(zip(x_other[k][key], y_other[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n x_2.append(x_temp)\n y_2.append(y_temp)\n ynames_2.append(ynames_other[k][key])\n seti.add(jj)\n jj += 1\n\n if compare:\n if x_theory_1:\n x_2.append(x_theory_1[k])\n y_2.append(y_theory_1[k])\n seti.add(jj)\n jj += 1\n if er:\n yer_2.append([0 for _ in x_theory_1[k]])\n\n groupings.append(seti)\n\n\n plot_dict['x'] = x_2\n plot_dict['y'] = y_2\n if er:\n plot_dict['yer'] = yer_2\n\n #experimenting\n # if len(data_classes) >1:\n # plot_dict['groupings'] = groupings\n plot_dict['groupings'] = groupings\n\n\n linestyles =['','-']\n markerstyles = ['o','']\n if lande and not only_lande:\n markerstyles = ['o', '*','-']\n linestyles = ['','','']\n if compare:\n linestyles = ['','-','']\n markerstyles =['o','','*']\n if lande and not only_lande:\n linestyles = ['', '-','--']\n markerstyles = ['o', '','']\n\n plot_dict['linestyles'] = linestyles #+linestyles+linestyles+linestyles\n plot_dict['markerstyles'] = markerstyles #+markerstyles+markerstyles+markerstyles\n\n size_group = len(groupings[0])\n if label is None:\n if len(_mylabel) <30:\n plot_dict['label'] = _mylabel +'_many_cl'\n else:\n plot_dict['label'] = tstat\n else:\n plot_dict['label'] = label\n\n if freq:\n plot_dict['label']+= '_xi_x'\n\n\n if pred:\n plot_dict['label'] += '_pred'\n\n if compare:\n plot_dict['label'] += '_comp'\n\n\n\n if time:\n plot_dict['label'] += '_t_' + str(time_real)\n\n\n #List with [text_top, text_bottom] containing relevant parameters\n # if len(data_classes) == 1:\n # if extra_text:\n # undertext.append(extra_text)\n #\n # if not no_undertext:\n # plot_dict['undertext'] = undertext\n\n if len(data_classes)>1:\n plot_dict['ynames'] = ynames_2\n\n if loglinear:\n plotspecs['xlog'] = True\n\n\n plot_dict['plotspecs'] = plotspecs\n\n plot_many_y(**plot_dict)", "def get_timescale_lines(ax, td):\n\t\n\tneur_N = [1e6, 1e4, 1e2, 1e0, 1e-2]\n\tneur_clabel = [r'$\\tau_{\\nu}=10^6$',r'$10^4$',r'$10^2$',r'$10^0$',r'$10^{-2}$']\n\n\tfor i in range(len(neur_N)):\n\t\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"tau_neu\"], [neur_N[i]], colors='b', linestyles='dotted')\n\t\tax.clabel(cs, fontsize=14, inline=1, fmt=neur_clabel[i])\t#inline_spacing = -50\n\n\tnucr_N = [1e6, 1e4, 1e2, 1e0, 1e-2,1e-4,1e-6]\n\tnucr_clabel = [r'$\\tau_\\mathrm{cc}=10^6$',r'$10^4$',r'$10^2$',r'$10^0$',r'$10^{-2}$',r'$10^{-4}$',r'$10^{-6}$']\n\n\tfor i in range(len(nucr_N)):\n\t\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"tau_nuc\"], [nucr_N[i]], colors='r', linestyles=':')\n\t\tax.clabel(cs, fontsize=14, inline=1, fmt=nucr_clabel[i])\n\n\tS_N = [1e8, 10**8.1, 10**8.2]\n\tS_clabel = [r'$s = 10^8$',r'$10^{8.1}$',r'$10^{8.2}$']\n\n\tfor i in range(len(S_N)):\n\t\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"S\"], [S_N[i]], colors='g', linestyles='dotted', linewidth=4)\n\t\tax.clabel(cs, fontsize=14, inline=1, fmt=S_clabel[i])\n\n\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"tau_eq_neunuc\"], [0], colors='m', linestyles='-', linewidth=6)\n\tax.clabel(cs, fontsize=14, inline=1, fmt=r'$\\tau_\\mathrm{cc}=\\tau_{\\nu}$')\n\n\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"Prat\"], [2.0], colors='k', linestyles='--', linewidth=6)\n\tax.clabel(cs, fontsize=14, inline=1, fmt=r'$P = 2P(T=0)$')\n\n\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"tau_eq_cdnuc\"], [0], colors='r', linestyles=':', linewidth=4)\n\tax.clabel(cs, fontsize=14, inline=1, fmt=r'$\\tau_\\mathrm{cc}=(G\\rho_c)^{-1/2}$')\n\n\tcs = ax.contour(td.rho_p, td.T_p, td.data_p[\"tau_eq_dynnuc\"], [0], colors=\"r\", linestyles='-', linewidths=2)\n\tax.clabel(cs, fontsize=14, inline=1, fmt=r\"$\\tau_\\mathrm{cc} = \\tau_\\mathrm{dyn}$\")", "def coffee_plot(times, Temps, axes, legends=[None, None], split=2):\n plot_model(times[split:], Temps[split:], axes[0], legends[0])\n plot_samples(times[:split], Temps[:split], axes[1], legends[1])", "def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,\n ax=None, show=True, color=None, xlabel=True, ylabel=True,\n legend=True, chance=True, label='Classif. score'):\n # XXX JRK: need cleanup in viz\n self._prep_times()\n fig = plot_gat_times(self, train_time='diagonal', title=title,\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,\n show=show, color=color, xlabel=xlabel,\n ylabel=ylabel, legend=legend, chance=chance,\n label=label)\n self._clean_times()\n return fig", "def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, \"time [ns]\",\n freq_list, \"f [GHz]\")\n hist.Draw()\n graph.Draw(\"sameL\")\n fit = ROOT.TF1(\"fit\", \"pol4\", 0, 20*1e6)\n fit.FixParameter(0, freq_list[0])\n graph.Fit(fit)\n canvas.Update()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def plot_ins_state(time, state):\n pylab.ion()\n\n plot_trajectory(state[:,0], state[:,1], state[:,2])\n\n\n # Plot position vs. time\n\n\n pylab.figure()\n pylab.subplot(311)\n pylab.plot(time, state[:,0],'r')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\phi$, rad')\n pylab.title('Latitude')\n pylab.grid(True)\n\n pylab.subplot(312)\n pylab.plot(time, state[:,1],'g')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\lambda$, rad')\n pylab.title('Longitude')\n pylab.grid(True)\n\n pylab.subplot(313)\n pylab.plot(time, state[:,2],'b')\n pylab.xlabel('time, s')\n pylab.ylabel('$h$, m')\n pylab.title('Altitude')\n pylab.grid(True)\n pylab.show()\n\n\n # Plot velocity vs. time\n pylab.figure()\n pylab.plot(time, state[:,3:6])\n pylab.xlabel('time, s')\n pylab.ylabel('Vn, Ve, Vd')\n pylab.title('Velocity vs. time')\n\n pylab.grid(True)\n pylab.show()\n\n # Plot acceleration vs. time\n pylab.figure()\n pylab.plot(time, state[:,6:9])\n pylab.xlabel('time, s')\n pylab.ylabel('an, ae, ad')\n pylab.title('Acceleration vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()\n\n # Plot quaternions vs. time\n pylab.figure()\n pylab.plot(time, state[:,9:])\n pylab.xlabel('time, s')\n pylab.ylabel('q0, q1, q2, q3')\n pylab.title('Quaternion vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()", "def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()", "def plot(self, *args, **kwargs):\n pass", "def plot_credible_intervals_for_time(\n histories: Union[List[History], History],\n labels: Union[List[str], str] = None,\n ms: Union[List[int], int] = None,\n ts: Union[List[int], int] = None,\n par_names: List[str] = None,\n levels: List[float] = None,\n show_mean: bool = False,\n show_kde_max: bool = False,\n show_kde_max_1d: bool = False,\n size: tuple = None,\n rotation: int = 0,\n refvals: Union[List[dict], dict] = None,\n kde: Transition = None,\n kde_1d: Transition = None,\n):\n histories = to_lists(histories)\n labels = get_labels(labels, len(histories))\n n_run = len(histories)\n if ms is None:\n ms = [0] * n_run\n elif not isinstance(ms, list) or len(ms) == 1:\n ms = [ms] * n_run\n if levels is None:\n levels = [0.95]\n levels = sorted(levels)\n if par_names is None:\n # extract all parameter names\n df, _ = histories[0].get_distribution(m=ms[0])\n par_names = list(df.columns.values)\n n_par = len(par_names)\n n_confidence = len(levels)\n if ts is None:\n ts = [history.max_t for history in histories]\n if refvals is not None and not isinstance(refvals, list):\n refvals = [refvals] * n_run\n\n # prepare axes\n fig, arr_ax = plt.subplots(\n nrows=n_par, ncols=1, sharex=False, sharey=False\n )\n if n_par == 1:\n arr_ax = [arr_ax]\n\n # prepare matrices\n cis = np.empty((n_par, n_run, 2 * n_confidence))\n median = np.empty((n_par, n_run))\n if show_mean:\n mean = np.empty((n_par, n_run))\n if show_kde_max:\n kde_max = np.empty((n_par, n_run))\n if show_kde_max_1d:\n kde_max_1d = np.empty((n_par, n_run))\n if kde is None and show_kde_max:\n kde = MultivariateNormalTransition()\n if kde_1d is None and show_kde_max_1d:\n kde_1d = MultivariateNormalTransition()\n\n # fill matrices\n # iterate over populations\n for i_run, (h, t, m) in enumerate(zip(histories, ts, ms)):\n df, w = h.get_distribution(m=m, t=t)\n # normalize weights to be sure\n w /= w.sum()\n # fit kde\n if show_kde_max:\n _kde_max_pnt = compute_kde_max(kde, df, w)\n # iterate over parameters\n for i_par, par in enumerate(par_names):\n # as numpy array\n vals = np.array(df[par])\n # median\n median[i_par, i_run] = compute_quantile(vals, w, 0.5)\n # mean\n if show_mean:\n mean[i_par, i_run] = np.sum(w * vals)\n # kde max\n if show_kde_max:\n kde_max[i_par, i_run] = _kde_max_pnt[par]\n if show_kde_max_1d:\n _kde_max_1d_pnt = compute_kde_max(kde_1d, df[[par]], w)\n kde_max_1d[i_par, i_run] = _kde_max_1d_pnt[par]\n # levels\n for i_c, confidence in enumerate(levels):\n lb, ub = compute_credible_interval(vals, w, confidence)\n cis[i_par, i_run, i_c] = lb\n cis[i_par, i_run, -1 - i_c] = ub\n\n # plot\n for i_par, (par, ax) in enumerate(zip(par_names, arr_ax)):\n for i_run in range(len(histories)):\n for i_c in reversed(range(len(levels))):\n y_err = np.array(\n [\n median[i_par, i_run] - cis[i_par, i_run, i_c],\n cis[i_par, i_run, -1 - i_c] - median[i_par, i_run],\n ]\n )\n y_err = y_err.reshape((2, 1))\n ax.errorbar(\n x=[i_run],\n y=median[i_par, i_run],\n yerr=y_err,\n capsize=(10.0 / n_confidence) * (i_c + 1),\n color=f'C{i_c}',\n )\n # reference value\n if refvals[i_run] is not None:\n ax.plot([i_run], [refvals[i_run][par]], 'x', color='black')\n ax.set_title(f\"Parameter {par}\")\n # mean\n if show_mean:\n ax.plot(range(n_run), mean[i_par], 'x', color=f'C{n_confidence}')\n # kde max\n if show_kde_max:\n ax.plot(\n range(n_run), kde_max[i_par], 'x', color=f'C{n_confidence + 1}'\n )\n if show_kde_max_1d:\n ax.plot(\n range(n_run),\n kde_max_1d[i_par],\n 'x',\n color=f'C{n_confidence + 2}',\n )\n ax.set_xticks(range(n_run))\n ax.set_xticklabels(labels, rotation=rotation)\n leg_colors = [f'C{i_c}' for i_c in reversed(range(n_confidence))]\n leg_labels = ['{:.2f}'.format(c) for c in reversed(levels)]\n if show_mean:\n leg_colors.append(f'C{n_confidence}')\n leg_labels.append(\"Mean\")\n if show_kde_max:\n leg_colors.append(f'C{n_confidence + 1}')\n leg_labels.append(\"Max KDE\")\n if show_kde_max_1d:\n leg_colors.append(f'C{n_confidence + 2}')\n leg_labels.append(\"Max KDE 1d\")\n if refvals is not None:\n leg_colors.append('black')\n leg_labels.append(\"Reference value\")\n handles = [\n Line2D([0], [0], color=c, label=l)\n for c, l in zip(leg_colors, leg_labels)\n ]\n ax.legend(handles=handles, bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n\n # format\n arr_ax[-1].set_xlabel(\"Population t\")\n if size is not None:\n fig.set_size_inches(size)\n fig.tight_layout()\n\n return arr_ax", "def plotOfHeatingCurrent(self):\n\t\tfig,p1=_plt.subplots()\n\t\tp1.plot(self.time*1e3,self.heatingCurrent)\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def time_axis(self):\n if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:\n raise cu.CubeError(1, 'No time axis present')\n delta = self.axes_wcs.wcs.cdelt[0]\n crpix = self.axes_wcs.wcs.crpix[0]\n crval = self.axes_wcs.wcs.crval[0]\n start = crval - crpix * delta\n stop = start + len(self.data) * delta\n cunit = u.Unit(self.axes_wcs.wcs.cunit[0])\n return np.linspace(start, stop, num=self.data.shape[-1]) * cunit", "def plotTimeDepth(d,v):\n\n dpth,t = getTimeDepth(d,v)\n plt.figure(num=0, figsize = (6, 4))\n plt.plot(dpth,t,linewidth=2);\n plt.title('Depth-Time');\n plt.grid()\n plt.gca().set_xlabel('Depth (m)',fontsize=9)\n plt.gca().set_ylabel('Two Way Time (s)',fontsize=9)\n\n plt.tight_layout()\n plt.show()", "def plot(self,angle=17,**kwargs):\r\n \r\n h1=plt.plot(self.t,self.y,**kwargs)\r\n plt.xticks(rotation=angle)\r\n \r\n return h1", "def generate_time_series(num_points, num_dims):\n phase = np.random.randn()\n period = np.random.uniform()\n times = np.linspace(0, 10, num_dims)\n scale = np.random.exponential(size=num_points)\n return np.outer(scale, np.sin(times / period + phase))", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def plot_spectrum(wf, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):\n if wf.header['nbits'] <= 2:\n logged = False\n t = 'all'\n ax = plt.gca()\n\n plot_f, plot_data = wf.grab_data(f_start, f_stop, if_id)\n\n # Using accending frequency for all plots.\n if wf.header['foff'] < 0:\n plot_data = plot_data[..., ::-1] # Reverse data\n plot_f = plot_f[::-1]\n\n if isinstance(t, int):\n print(\"extracting integration %i...\" % t)\n plot_data = plot_data[t]\n elif t == 'all':\n print(\"averaging along time axis...\")\n # Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1\n if len(plot_data.shape) > 1:\n plot_data = plot_data.mean(axis=0)\n else:\n plot_data = plot_data.mean()\n else:\n raise RuntimeError(\"Unknown integration %s\" % t)\n\n # Rebin to max number of points\n dec_fac_x = 1\n if plot_data.shape[0] > MAX_PLT_POINTS:\n dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)\n\n plot_data = rebin(plot_data, dec_fac_x, 1)\n plot_f = rebin(plot_f, dec_fac_x, 1)\n\n if not c:\n kwargs['c'] = '#333333'\n\n if logged:\n plt.plot(plot_f, db(plot_data), label='Stokes I', **kwargs)\n plt.ylabel(\"Power [dB]\")\n else:\n\n plt.plot(plot_f, plot_data, label='Stokes I', **kwargs)\n plt.ylabel(\"Power [counts]\")\n plt.xlabel(\"Frequency [MHz]\")\n plt.legend()\n\n try:\n plt.title(wf.header['source_name'])\n except KeyError:\n plt.title(wf.filename)\n\n plt.xlim(plot_f[0], plot_f[-1])", "def tgfs_plot():\n plt.rc('font', size=10) # control the default font size\n plt.rc('axes', titlesize=11) # fontsize of the axes title\n plt.rc('axes', labelsize=12) # fontsize of the axes x and y labels\n plt.rc('xtick', labelsize=10) # fontsize of the tick label\n plt.rc('ytick', labelsize=10) # fontsize of the tick label\n plt.rc('legend', fontsize=11) # legend fontsize\n\n # generate data\n x = np.arange(-10, 10.1, 0.1)\n y = x**3\n\n # plot figure\n plt.figure()\n plt.plot(x, y, label='$x^3$')\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.title('$y = x^3 for title, grid, & font size control \"plt.rc()\"$')\n plt.legend(loc='upper left')\n plt.grid()\n plt.show()\n\n return None", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def tick(self):\r\n pass", "def plot(self, add_labels=False,\n _type=\"scatter\", color=None,\n layer=1, edgecolor='black'):\n _type = _type.lower()\n assert _type in (\"scatter\", \"line\")\n if _type == \"scatter\":\n scatter(\n self.times,\n self.temperatures,\n alpha=.8,\n zorder=layer,\n edgecolors=edgecolor\n )\n elif _type == \"line\":\n plt.plot(self.times,\n self.temperatures,\n \"-\", color=color,\n zorder=layer)\n if add_labels:\n self.set_plot_labels()", "def plot_u(times, u, t_deltas, is_own_event, figsize=(16, 6)):\n\n t_deltas = np.asarray(t_deltas)\n is_own_event = np.asarray(is_own_event)\n\n seq_len = np.nonzero(t_deltas == 0)[0][0] # First index where t_delta = 0\n abs_t = np.cumsum(t_deltas[:seq_len])\n abs_own = is_own_event[:seq_len]\n\n our_events = [t for (t, o) in zip(abs_t, abs_own) if o]\n other_events = [t for (t, o) in zip(abs_t, abs_own) if not o]\n\n u_max = np.max(u)\n\n plt.figure(figsize=(16, 6))\n\n c1, c2, c3 = sns.color_palette(n_colors=3)\n\n plt.plot(times, u, label='$u(t)$', color=c1)\n plt.vlines(our_events, 0, 0.75 * u_max, label='Us', alpha=0.5, color=c2)\n plt.vlines(other_events, 0, 0.75 * u_max, label='Others', alpha=0.5, color=c3)\n plt.xlabel('Time')\n plt.ylabel('$u(t)$')\n plt.legend()", "def get_timing_signal(length,\n min_timescale=1,\n max_timescale=1e4,\n num_timescales=16):\n positions = tf.to_float(tf.range(length))\n log_timescale_increment = (\n math.log(max_timescale / min_timescale) / (num_timescales - 1))\n inv_timescales = min_timescale * tf.exp(\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)\n return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)" ]
[ "0.6426607", "0.62802076", "0.62569886", "0.62506014", "0.6236783", "0.6092056", "0.6068509", "0.60091037", "0.60037977", "0.60024494", "0.6001193", "0.59628236", "0.59590256", "0.5951325", "0.59470856", "0.59453624", "0.5932996", "0.5908867", "0.5891108", "0.5865043", "0.58623916", "0.58553123", "0.5847916", "0.5842294", "0.5838787", "0.5835769", "0.5827261", "0.5806125", "0.5788757", "0.5769874", "0.5757819", "0.57442766", "0.5732562", "0.5716214", "0.56921726", "0.56603086", "0.56530446", "0.5647014", "0.56410336", "0.5638046", "0.56163913", "0.56142664", "0.5597951", "0.5592093", "0.5589442", "0.5573702", "0.5527901", "0.55177313", "0.55170363", "0.5504068", "0.55018246", "0.54961497", "0.5489874", "0.5487542", "0.54855007", "0.5484473", "0.54785186", "0.54728293", "0.54647636", "0.54558146", "0.54287356", "0.5424739", "0.5416077", "0.54079515", "0.5405577", "0.5404089", "0.54000074", "0.53952485", "0.53602546", "0.5358707", "0.5353974", "0.5350642", "0.5349267", "0.5344882", "0.53390926", "0.5335862", "0.5333289", "0.5330468", "0.53300047", "0.53239506", "0.53235006", "0.5321521", "0.53205174", "0.53179973", "0.53117746", "0.530653", "0.5300981", "0.529513", "0.52894044", "0.52817804", "0.5276066", "0.5274625", "0.5274625", "0.5273512", "0.527326", "0.5270635", "0.5270284", "0.5268577", "0.52681667", "0.52672297" ]
0.68075925
0
Generates mapping from water measurements column names to indices of the given header.
def get_water_index_map(archive, header): column_re = { 'surface': { 'flow': 'pretok', 'level': 'vodostaj' }, 'ground': { 'altitude': 'nivo', 'level': 'vodostaj' } } column_map = {key: -1 for key in column_re[archive].keys()} empty = True # Do regex search of every db column for every CSV file column heading. for i, column in enumerate(header): for column_name in column_re[archive].keys(): if re.search(column_re[archive][column_name], column, re.IGNORECASE): if column_map[column_name] != -1: continue column_map[column_name] = i empty = False return None if empty else column_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol", "def getColumnIndices(*args, filepath=\"CO2.tab\"):\n # idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"SEG\": 0}\n idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"VISG\": 0, \"VISHL\": 0, \"ROG\": 0, \"ROHL\": 0}\n if filepath:\n cols = tabLineToList(readFullLine(filepath, 52))\n for key in idxDict:\n idxDict[key] = cols.index(key)\n return idxDict", "def getIndividual2ColIndex(cls, header, col_name2index, sampleStartingColumn=9):\n\t\tsys.stderr.write(\"Finding all individuals ...\")\n\t\tno_of_cols = len(header)\n\t\tindividual_name2col_index = {}\t#individual's column name -> an opened file handler to store genetic data\n\t\tcounter = 0\n\t\tfor i in xrange(sampleStartingColumn, no_of_cols):\n\t\t\tindividualName = header[i]\n\t\t\tcol_index = col_name2index.get(individualName)\n\t\t\tif not individualName:\t#ignore empty column\n\t\t\t\tcontinue\n\t\t\tif individualName[:-4]=='.bam':\n\t\t\t\tindividualCode = individualName[:-4]\t#get rid of .bam\n\t\t\telse:\n\t\t\t\tindividualCode = individualName\n\t\t\tindividual_name2col_index[individualCode] = col_index\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"%s individuals added. Done.\\n\"%(counter))\n\t\treturn individual_name2col_index", "def find_indeces(self, header):\n indeces = {'T': None, 'WV': None, 'WK': None, 'BZ': None, 'SPR': None,\n 'WBER': None, 'ABG.': None, 'UNG.': None, 'SPOE': None,\n 'FPOE': None, 'OEVP': None, 'GRUE': None, 'NEOS': None,\n 'WWW': None, 'ANDAS': None, 'GFW': None, 'SLP': None,\n 'WIFF': None, 'M': None, 'FREIE': None}\n for index, item in enumerate(header):\n indeces[item] = index\n return indeces", "def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict", "def get_header_indices(filepath):\n\theaders = get_header_list(filepath, sort=False)\n\treturn {h: i for i, h in enumerate(headers)}", "def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id", "def column_info(colum_header):\n commas = colum_header.count(',')\n if commas == 0:\n return (column_name(clean_token(colum_header)))\n\n (key, units, location) = colum_header.split(',')\n key = column_name(clean_token(key))\n units = clean_token(units)\n location = clean_token(location)\n return (key, units, location)", "def parseHeader(header):\n tokens = [t for t in header.split(' ') if t]\n result = {}\n for i in range(len(tokens)):\n result[tokens[i]] = i \n\n return result", "def _get_table_columns(self):\n try:\n table_header = parse_table_head(self.table.value, version=self.version)\n merged_data = self.table.value[table_header.tdef_header_end:]\n if table_header.TDEF_header.next_page_ptr:\n merged_data = merged_data + self._merge_table_data(table_header.TDEF_header.next_page_ptr)\n\n parsed_data = parse_table_data(merged_data, table_header.real_index_count,\n table_header.column_count, version=self.version)\n\n # Merge Data back to table_header\n table_header['column'] = parsed_data['column']\n table_header['column_names'] = parsed_data['column_names']\n\n except ConstructError:\n logging.error(f\"Failed to parse table header {self.table.value}\")\n return\n col_names = table_header.column_names\n columns = table_header.column\n\n # Add names to columns metadata so we can use only columns for parsing\n for i, c in enumerate(columns):\n c.col_name_str = col_names[i].col_name_str\n\n # column_index is more accurate(id is always incremented so it is wrong when a column is deleted).\n # Some tables like the catalog don't have index, so if indexes are 0 use id.\n\n # create a dict of index to column to make it easier to access. offset is used to make this zero based\n offset = min(x.column_index for x in columns)\n column_dict = {x.column_index - offset: x for x in columns}\n # If column index is not unique try best effort\n if len(column_dict) != len(columns):\n # create a dict of id to column to make it easier to access\n column_dict = {x.column_id: x for x in columns}\n\n if len(column_dict) != table_header.column_count:\n logging.debug(f\"expected {table_header.column_count} columns got {len(column_dict)}\")\n return column_dict, table_header", "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None", "def header_population(headers):\n return [{'id': field, 'name': field, 'field': field, 'sortable': True} for field in headers]", "def get_report_column_names(self):\r\n # Compose the list of report_column names required for\r\n # summary_report.dsw.DictWriter()\r\n sr = self.summary_report\r\n dict_leader = sr.dict_leader\r\n dict_out = sr.dict_out\r\n column_names = self.column_names\r\n report_column_names = []\r\n #if dict_leader is not None and dict_out is not None:\r\n if dict_leader is not None and dict_out is not None:\r\n for key,value in dict_leader.iteritems():\r\n #print \"Adding report_column_name(from dict_leader)=\",key\r\n report_column_names.append(key)\r\n dict_out[key] = value\r\n # We have to initialize the DictWriter with the report_column_names\r\n # below. \r\n # Also need matched coord_val and var names for calling node_report()\r\n # below, so we do this duplication of storage of names. \r\n coord_var_names = []\r\n coord_val_names = []\r\n for idx, column_name in enumerate(column_names):\r\n var_name = \"Var_%s\" % str(idx+1)\r\n report_column_names.append(var_name)\r\n coord_var_names.append(var_name)\r\n val_name = \"Val_%s\" % str(idx+1)\r\n report_column_names.append(val_name)\r\n coord_val_names.append(val_name)\r\n # Add the entry report_column_names\r\n report_column_names += self.EntryClass.report_column_names\r\n return report_column_names", "def parse_header(f):\n columns = ['pokemon', 'species_id', 'height', 'weight', 'type_1', 'type_2',\n 'url_image', 'generation_id', 'evolves_from_species_id']\n sep = ','\n result = {}\n allData = []\n with open(const.DATA_FILENAME, newline=\"\") as myData:\n for line in myData:\n line = line.strip()\n line = line.split(sep)\n allData.append(line)\n for i in columns:\n j = 0\n while j < len(allData[0]):\n if allData[0][j] == i:\n result[i] = j\n j += 1\n return result", "def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment", "def get_interesting_mapping_fields(mapping_data, mapping_headers):\r\n result = []\r\n num_samples = len(mapping_data)\r\n num_cols = len(mapping_headers)\r\n transposed_data = array(mapping_data).T\r\n for header, datum in zip(mapping_headers, transposed_data):\r\n d = set(datum)\r\n len_d = len(d)\r\n if len_d > 1 and len_d < num_samples:\r\n result.append(header)\r\n return result", "def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header", "def map_column_indexes(self, merge_specification, ingredients):\n last_column = len(ingredients) - 1\n accumulating = {}\n remove = set()\n # default behavior, no column merge\n for column_index in range(0, last_column + 1):\n self.column_index_to_columns[column_index] \\\n = [(column_index, 1.0)]\n \n for columns in merge_specification:\n accumulating_column = columns[0][0]\n if accumulating_column > last_column or accumulating_column < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % accumulating_column)\n # specifies which columns should be merged into this one\n accumulating[accumulating_column] = columns\n for column_index, _ in columns[1:]:\n column_index = column_index\n if column_index > last_column or column_index < 0:\n raise MergeConfigError(\n \"Attempted to merge missing column %d\" % column_index) \n # drop this column; it will be merged into another\n remove.add(column_index)\n \n # drop columns first so that any columns both specified as\n # accumulating *and* merged columns do not get dropped\n for column_index in remove:\n self.column_index_to_columns[column_index] = None\n \n for column_index, columns in accumulating.items():\n self.column_index_to_columns[column_index] = columns", "def get_columns_dict(table, replace):\n # 0 is name, 1 is id\n if type(table.index) == pd.MultiIndex:\n colcount = 1 + len(table.index[0])\n else:\n colcount = 2\n cols = {}\n for c in table.columns:\n c_repres = \",\".join(c)\n if \"Filtergroups\" not in c:\n cols[colcount] = replace_in_str(str(c_repres), replace)\n colcount = colcount + 1\n return cols", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def get_sip_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n a_order = int(header.get('A_ORDER', 0))\n b_order = int(header.get('B_ORDER', 0))\n ac = np.matrix(np.zeros((a_order+1, a_order+1), dtype=np.float64))\n bc = np.matrix(np.zeros((b_order+1, b_order+1), dtype=np.float64))\n for m in range(a_order+1):\n for n in range(0, a_order+1-m):\n ac[m, n] = header.get('A_%d_%d' % (m, n), 0.0)\n for m in range(b_order+1):\n for n in range(0, b_order+1-m):\n bc[m, n] = header.get('B_%d_%d' % (m, n), 0.0)\n return cd, ac, bc", "def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out", "def get_tbl_headers(rows):\n tbl_header = rows.pop(0)\n tbl_headers = {}\n for index, header_name in enumerate(tbl_header.find_all('th')):\n if header_name.text in conf.TABLE_HEADER_COLS:\n tbl_headers[header_name.text] = index\n return tbl_headers", "def getIndex(self,filt):\n indx = [i for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def get_mention_id_mappings(input_file_name, out_file, field_threshold, column_indices):\n out = open(out_file, 'w')\n if input_file_name.endswith('.csv'):\n with open(input_file_name, 'r') as f:\n reader = csv.reader(f)\n for line in reader:\n if len(line) >= field_threshold:\n if not line[0].startswith(\"#\"):\n row = []\n for index in column_indices:\n row.append(line[index])\n out.write(\"|\".join(row) + \"\\n\")\n out.close()", "def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers", "def writeTotalIndex(map_data):\n ids = map_data['id']\n index = [0.0] * len(ids)\n\n colnames = ['cars', 'bikes', 'ages', 'parking', 'male_singles',\n 'female_singles', 'digging', 'freeparking']\n weights = [-0.5, 0.5, 0.1, -0.5, 1.0, 1.0, -1.0, 0.25]\n for colname, weight in zip(colnames, weights):\n values = map_data[colname]\n\n index += values * (weight / values.max())\n\n toJson('final', pd.DataFrame({'id': ids, 'counts': index}))", "def get_column_dict(self) -> HeaderToWells:\n return self._grid.columns", "def add_columns(self, npy_arr, tile):\n size = len(npy_arr)\n\n tile_name = Tile.ZONES[tile.name[0].lower()] + tile.name[1:]\n\n # create ids\n def get_id(order):\n order = str(order).rjust(10, \"0\")\n return int(tile_name + order)\n ids = np.fromiter(\n (get_id(idx + 1) for idx in range(size)), dtype=np.int64)\n\n # calculate the hjds\n mjd_h, mjd_j, mjd_k = tile.epochs\n\n gen_h = (pyasl.helio_jd(mjd_h, ra, dec)\n for ra, dec in zip(npy_arr[\"ra_h\"], npy_arr[\"dec_h\"]))\n hjd_h = np.fromiter(gen_h, dtype=float)\n\n gen_j = (pyasl.helio_jd(mjd_j, ra, dec)\n for ra, dec in zip(npy_arr[\"ra_j\"], npy_arr[\"dec_j\"]))\n hjd_j = np.fromiter(gen_j, dtype=float)\n\n gen_k = (pyasl.helio_jd(mjd_k, ra, dec)\n for ra, dec in zip(npy_arr[\"ra_k\"], npy_arr[\"dec_k\"]))\n hjd_k = np.fromiter(gen_k, dtype=float)\n\n columns = [\n (\"id\", ids),\n (\"hjd_h\", hjd_h),\n (\"hjd_j\", hjd_j),\n (\"hjd_k\", hjd_k)]\n return add_columns(npy_arr, columns)", "def get_pv_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n pv1 = np.zeros((40,), dtype=np.float64)\n pv2 = np.zeros((40,), dtype=np.float64)\n for k in range(40):\n pv1[k] = header.get('PV1_%d' % k, 0.0)\n pv2[k] = header.get('PV2_%d' % k, 0.0)\n return cd, pv1, pv2", "def MakeDictMatrix(D, header, lev=72):\n Ms = np.zeros((1, lev, len(header)))\n Mp = np.ones((1, lev, len(header)))\n for var in D.dict.keys():\n i = header.index(var)\n Ms[0, :, i] = D[var].sub_vec(lev)\n Mp[0, :, i] = D[var].prod_vec(lev)\n return(Ms,Mp)", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = pd.concat((self.train[col], self.test[col])).unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to convert the ids in-place.\n self.train.loc[:, col] = self.train[col].apply(lambda _id: idmap[_id])\n self.test.loc[:, col] = self.test[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def get_column_indices(indices, inputs, multiple):\n if multiple:\n res = OrderedDict()\n for p in indices:\n ov, onnx_i = get_column_index(p, inputs)\n if ov not in res:\n res[ov] = []\n res[ov].append(onnx_i)\n return res\n\n onnx_var = None\n onnx_is = []\n for p in indices:\n ov, onnx_i = get_column_index(p, inputs)\n onnx_is.append(onnx_i)\n if onnx_var is None:\n onnx_var = ov\n elif onnx_var != ov:\n cols = [onnx_var, ov]\n raise NotImplementedError( # pragma: no cover\n \"sklearn-onnx is not able to merge multiple columns from \"\n \"multiple variables ({0}). You should think about merging \"\n \"initial types.\".format(cols))\n return onnx_var, onnx_is", "def _index_from_key(self, key):\n\t\t\n\t\treturn self.columns.index(str.upper(key[0])), self.rows.index(key[1])", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def determine_coordinates_and_cell_names(self):\n self.coordinates_and_cell_headers = [\n annot[0]\n for annot in self.file.columns\n if annot[0].lower() in (\"z\", \"y\", \"x\", \"name\")\n ]\n # annotation column names\n self.annot_column_headers = [\n annot\n for annot in self.file.columns\n if annot[0].lower() not in (\"z\", \"y\", \"x\", \"name\")\n ]", "def build_multi_index_tuples(header: list, sub_header: list) -> list:\r\n\r\n tuples = []\r\n\r\n for head in header:\r\n for sub_head in sub_header:\r\n tuples.append((head, sub_head))\r\n\r\n return tuples", "def create_deft_table_csv_mappings():\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings", "def create_columns(headers: List, annot_types: List):\n new_column_names: List[Tuple] = []\n for annotation, annot_type in zip(headers, annot_types):\n new_column_names.append((annotation, annot_type))\n return new_column_names", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def _tables_from_headers(headers):\n return {table for field, table in _DATA_TABLES_BY_UNIQUE_FIELD.items() if field in headers}", "def convert_header_to_multi_index(df, header_names: List[Tuple]):\n index = pd.MultiIndex.from_tuples(header_names, names=[\"NAME\", \"TYPE\"])\n df.columns = pd.MultiIndex.from_tuples(index)\n return df", "def minimize_header(self, header):\n header = self.locate.fits_to_parkeys(header) # reference vocab --> dataset vocab\n if isinstance(self, PipelineContext):\n instrument = self.get_instrument(header)\n mapping = self.get_imap(instrument)\n keys = mapping.get_required_parkeys() + [self.instrument_key]\n else:\n keys = self.get_required_parkeys()\n minimized = {}\n for key in keys:\n minimized[key] = header.get(key.lower(),\n header.get(key.upper(), \"UNDEFINED\"))\n return minimized", "def get_column_reference(headers, name):\n return chr(ord('A') + headers.index(name))", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def _bin_hits_to_unit_indices(rows, cols, target_breaks, source_breaks,\n su_start):\n # keep track of mapping between matrix row index and target unit index\n # in ``target_units``\n row2t_unit_ind = np.array([\n u_ind\n for u_ind in range(len(target_breaks) - 1)\n for _ in range(target_breaks[u_ind+1] - target_breaks[u_ind])])\n # keep track of mapping between matrix column index and source unit index\n # in ``source_units``\n col2s_unit_ind = np.array([\n u_ind\n for u_ind in range(len(source_breaks) - 1)\n for _ in range(source_breaks[u_ind+1] - source_breaks[u_ind])])\n tmp = {}\n hits2positions = {}\n t_inds = row2t_unit_ind[rows]\n s_inds = col2s_unit_ind[cols]\n t_poses = rows - target_breaks[t_inds]\n s_poses = cols - source_breaks[s_inds]\n # although s_inds needs to index the source_breaks by the ordering of this\n # batch of source_units, s_inds needs to account for source_unit indices as\n # referenced from outside of this batch\n s_inds += su_start\n for t_ind, s_ind, t_pos, s_pos in zip(t_inds, s_inds, t_poses, s_poses):\n key = (t_ind, s_ind)\n if key not in tmp:\n tmp[key] = (t_pos, s_pos)\n elif key not in hits2positions:\n hits2positions[key] = [tmp[key], (t_pos, s_pos)]\n else:\n hits2positions[key].append((t_pos, s_pos))\n hits2positions = {k: np.array(v) for k, v in hits2positions.items()}\n return hits2positions", "def get_cols_dummy():", "def _get_idx_maps(self, types, initial_mapping=None):\n initial_mapping = constants.INITIAL_MAPPING if initial_mapping is None else initial_mapping\n # generate type to index mappings\n self.type_to_idx['word'] = Preprocessor.type_to_idx(types['word'], initial_mapping['word'])\n self.type_to_idx['char'] = Preprocessor.type_to_idx(types['char'], initial_mapping['word'])\n self.type_to_idx['tag'] = Preprocessor.type_to_idx(types['tag'], initial_mapping['tag'])", "def filter_mapping_file(map_data, map_header, good_sample_ids,\r\n include_repeat_cols=False, column_rename_ids=None):\r\n # keeping samples\r\n to_keep = []\r\n to_keep.extend([i for i in map_data if i[0] in good_sample_ids])\r\n\r\n # keeping columns\r\n headers = []\r\n to_keep = zip(*to_keep)\r\n headers.append(map_header[0])\r\n result = [to_keep[0]]\r\n\r\n if column_rename_ids:\r\n # reduce in 1 as we are not using the first colum (SampleID)\r\n column_rename_ids = column_rename_ids - 1\r\n for i, l in enumerate(to_keep[1:-1]):\r\n if i == column_rename_ids:\r\n if len(set(l)) != len(result[0]):\r\n raise ValueError(\r\n \"The column to rename the samples is not unique.\")\r\n result.append(result[0])\r\n result[0] = l\r\n headers.append('SampleID_was_' + map_header[i + 1])\r\n elif include_repeat_cols or len(set(l)) > 1:\r\n headers.append(map_header[i + 1])\r\n result.append(l)\r\n else:\r\n for i, l in enumerate(to_keep[1:-1]):\r\n if include_repeat_cols or len(set(l)) > 1:\r\n headers.append(map_header[i + 1])\r\n result.append(l)\r\n headers.append(map_header[-1])\r\n result.append(to_keep[-1])\r\n\r\n result = map(list, zip(*result))\r\n\r\n return headers, result", "def convert_field_name_to_indexes(field_name):\n rows = '87654321'\n columns = 'abcdefgh'\n row_index = column_index = None\n\n row_name = field_name[0]\n for i in range(8):\n if rows[i] == row_name:\n row_index = i\n\n column_name = field_name[1]\n for i in range(8):\n if columns[i] == column_name:\n column_index = i\n\n return row_index, column_index", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def build_index(text: Iterable) -> Dict[str, List[Tuple[int, int]]]:\n index = defaultdict(list)\n for line_no, line in enumerate(text, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n return index", "def __generate_features_index__(self, feature_names, dictionaries):\n keys = []\n for name, dictionary in zip(feature_names, dictionaries):\n features = []\n for feature in dictionary.keys():\n if dictionary.get(feature) > self._cutoff:\n features.append((name, feature))\n self.feature_freq[name] += 1\n keys.extend(features)\n for i in range(len(keys)):\n self._features_index[keys[i]] = i\n self.features_list = tuple(keys)\n self._features_vector_length = len(keys)", "def index_by(self, column_or_label):\n column = self._get_column(column_or_label)\n index = {}\n for key, row in zip(column, self.rows):\n index.setdefault(key, []).append(row)\n return index", "def wind_profile_indices_map(self):\n if self._hybrid_meta is None:\n return np.array([]), np.array([])\n\n idxs = self._hybrid_meta[self.__wind_rpi_n].astype(int)\n idxs = idxs[idxs >= 0]\n\n return idxs.index.values, idxs.values", "def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index", "def generate_bitmap_to_linear_index_map(bladeTupList, firstIdx):\n bitmap_map = np.zeros(len(bladeTupList), dtype=int)\n for ind, blade in enumerate(bladeTupList):\n bitmap_map[compute_bitmap_representation(blade, firstIdx)] = ind\n return bitmap_map", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = self.dataset[col].unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to conver the ids in-place.\n self.dataset[col] = self.dataset[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def index_value(self):\r\n\t\tfor index, column_header in enumerate(self.header_row):\r\n\t\t\tprint(index, column_header)", "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def pivot_rows_to_keys(header_list, row_list, key_names=[], whitelist_headers=None):\n header_map = {v: header_list.index(v) for v in\n [x for x in header_list if not whitelist_headers or x in whitelist_headers or x in key_names]}\n\n key_idxs = []\n for key_name in key_names:\n key_idxs.append(header_map[key_name])\n\n #key_idx = header_map[key_name]\n #return {\"{}{}\".format(x[key_idx],x[keya_idx]): {k: x[v] for k, v in list(header_map.items())} for x in row_list}\n\n return {\":\".join(itemgetter(*key_idxs)(x)): {k: x[v] for k, v in list(header_map.items())} for x in row_list}", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def __valuesToIndices(self, mappings, values):\n indices = np.empty(0, dtype=np.int_)\n\n for key, _ in mappings.items():\n # Lookup the index of the value of the values in the map.\n index = mappings[key](values[key])\n\n indices = np.hstack((indices, index))\n\n return indices", "def nomenclatura():\n df = pd.read_csv(\"Data/nomenclatura_1.csv\", encoding = \"latin1\")\n #dict_axis = df.set_index('id').T.to_dict('list')\n dict_axis = dict( [ (i, [a,b]) for i, a,b in zip(df.id, df.latitude, df.longitude) ] )\n\n return dict_axis", "def parse_header(header):\n if header[0] != '@':\n return None\n \n instrument, run_number, flowcell_id, lane, tile, x_pos, y_pos_read, is_filtered, control_number, index = header[1:].split(\":\")\n y_pos, read = y_pos_read.split()\n return {'instrument': str(instrument),\n 'run_number': int(run_number),\n 'flowcell_id': str(flowcell_id),\n 'lane': int(lane),\n 'tile': int(tile),\n 'x_pos': int(x_pos),\n 'y_pos': int(y_pos),\n 'read': int(read),\n 'is_filtered': (is_filtered == 'Y'),\n 'control_number': int(control_number),\n 'index': str(index)} # Note that MiSeq Reporter outputs a SampleSheet index rather than the index sequence", "def make_headers(worksheet):\n headers = {}\n cell_idx = 0\n while cell_idx < worksheet.ncols:\n cell_type = worksheet.cell_type(0, cell_idx)\n if cell_type == 1:\n header = slughifi(worksheet.cell_value(0, cell_idx))\n if not header.startswith(\"_\"):\n headers[cell_idx] = header\n cell_idx += 1\n return headers", "def build_data_dict(stats_arr, prefix):\n # Get no. of points in each dimension.\n N_sam, N_thres, N_z, N_kbins = stats_arr.shape\n\n # Create dictionary with column names that can be used by ColumnDataSource\n data_dict = {}\n for n in range(N_thres):\n for j in range(N_z):\n for m in range(N_kbins):\n key = \"tot_%s_h%d_k%d_z%d\" % (prefix, n+1, m+1, j+1)\n data_dict[key] = stats_arr[:,n, j, m]\n return data_dict", "def _infer_ipa_col(self, header):\n\t\tif self.ipa_col and isinstance(self.ipa_col, str):\n\t\t\tif self.ipa_col in header:\n\t\t\t\treturn header.index(self.ipa_col)\n\n\t\t\ttry:\n\t\t\t\tipa_col = int(self.ipa_col)\n\t\t\texcept ValueError: pass\n\t\t\telse:\n\t\t\t\treturn ipa_col\n\n\t\t\traise ValueError('Could not find column: {}'.format(self.ipa_col))\n\n\t\tpot = []\n\n\t\tfor index, col_name in enumerate(header):\n\t\t\tcol_name = col_name.lower()\n\t\t\tfor name in IPA_COL_NAMES:\n\t\t\t\tif col_name.startswith(name):\n\t\t\t\t\tpot.append(index)\n\n\t\tif len(pot) == 0:\n\t\t\traise ValueError('Could not find an IPA column')\n\t\telif len(pot) > 1:\n\t\t\traise ValueError('Could not decide which is the IPA column')\n\n\t\treturn pot[0]", "def reference_keys_to_dataset_keys(rmapping, header):\n header = dict(header)\n try:\n translations = rmapping.reference_to_dataset\n except AttributeError:\n pass\n else:\n # Add replacements for translations *if* the existing untranslated value\n # is poor and the translated value is better defined. This is to do\n # translations w/o replacing valid/concrete DM values with something \n # like guessed values of \"UNDEFINED\" or \"N/A\".\n for rkey in translations:\n if rkey in header:\n dkey = translations[rkey]\n dval = header.get(translations[rkey], None)\n rval = header[rkey]\n if dval in [None, \"N/A\", \"UNDEFINED\"] and rval not in [None, \"UNDEFINED\"]:\n header[dkey] = rval\n if \"USEAFTER\" in header: # and \"DATE-OBS\" not in header:\n reformatted = timestamp.reformat_useafter(rmapping, header).split()\n header[\"DATE-OBS\"] = header[\"META.OBSERVATION.DATE\"] = reformatted[0]\n header[\"TIME-OBS\"] = header[\"META.OBSERVATION.TIME\"] = reformatted[1]\n return header", "def get_sample_idx(sample, header):\n\n for item in header:\n if sample in item:\n return header.index(item)\n\n print(sample + \" not found in header, check input files.\")\n sys.exit()", "def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return", "def format_header(line):\n cols = []\n for n, item in enumerate(line.replace('#', '').strip().split('\\t')):\n if item == 'id/name':\n cols.append('name')\n elif item == 'brass_score':\n cols.append('score')\n elif item.startswith('strand') and n > 9:\n if item == 'strand1':\n cols.append('transcript1_strand')\n elif item == 'strand2':\n cols.append('transcript2_strand')\n else:\n raise ValueError(\"Unknown strand value column {0} - {1}\".format(n, item))\n else:\n cols.append(item.lower().replace(' ', '_').replace('/', '_').replace('-', '_'))\n return cols", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def fits_to_parkeys(fits_header):\n if MODEL is None:\n raise MissingDependencyError(\"JWST data models are not installed. Cannot fits_to_parkeys().\")\n parkeys = {}\n for key, value in fits_header.items():\n key, value = str(key), str(value)\n if not key.lower().startswith(\"meta.\"):\n pk = cached_dm_find_fits_keyword(key)\n if not pk:\n pk = key\n else:\n assert len(pk) == 1, \"CRDS JWST Data Model ambiguity on \" + \\\n repr(key) + \" = \" + repr(pk)\n pk = pk[0]\n else:\n pk = key\n pk = pk.upper()\n parkeys[pk] = value\n return parkeys", "def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header", "def __get_column_names(self, path_to_xml):\n span_table = xmlet.parse(path_to_xml / Path(\"SSTDataFormatTimeSpanTable.xml\")).getroot()\n filetype = MAP_RBD_TYPE_TO_FILE_TYPE[self.__rbd_type]\n\n for item in span_table:\n if item[0].text == filetype and item[1].text <= self.date <= item[2].text:\n data_description_file_name = item[3].text\n\n xml = xmlet.parse(path_to_xml / Path(data_description_file_name)).getroot()\n\n header = dict()\n for child in xml:\n var_name = child[0].text\n var_dim = int(child[1].text)\n var_type = child[2].text\n var_unit = child[3].text\n\n np_type = XML_TYPE_TO_NUMPY_TYPE[var_type]\n\n header.update({var_name: [var_dim, np_type, var_unit]})\n\n return header", "def _column_sorting_key(self, c):\n first_index = 0\n if c.startswith('hybrid'):\n first_index = 1\n elif c.startswith('solar'):\n first_index = 2\n elif c.startswith('wind'):\n first_index = 3\n elif c == MERGE_COLUMN:\n first_index = -1\n return first_index, self._hybrid_meta.columns.get_loc(c)", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def make_dof_value_map(robot):\n names = [j.GetName() for j in robot.GetJoints()]\n indices = [j.GetDOFIndex() for j in robot.GetJoints()]\n\n def get_dofs():\n pose={}\n values=robot.GetDOFValues()\n for (i,n) in zip(indices,names):\n pose.setdefault(n,values[i])\n return pose\n\n return get_dofs", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def data_population(in_data, headers=None):\n headers = headers or in_data[0]\n\n return [\n dict([(header, row[cindex])\n for cindex, header in enumerate(headers)])\n for row in in_data\n ]", "def get_horiz_structure(col_names, num_rows):\n struct = [{name: random.randint(0, max_cell_value) for name in col_names} for _ in range(num_rows)]\n return struct", "def index_prob_id(blocks):\n return blocks.set_index(\n pd.Index(\n blocks.index.str.extract(r\"HOAF_(\\d\\d)\\.vhdr\").astype(int)[0],\n name=\"prob_id\",\n )\n )", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def setup_normalyzer_header(design_matrix: DF, annot_cols: List[str], normalyzer_vals:DF) -> DF:\n\n # Get numbers set up as list of stringified numbers ('-1', '0', '0', '1', '1')\n nbr_annot_cols = len(annot_cols)\n sample_head = [-1] + [0] * (nbr_annot_cols - 1) + list(design_matrix['biorepgroup'])\n sample_head_str = [str(e) for e in sample_head]\n\n # Get text-information about each column\n label_row = list(normalyzer_vals.columns)[:nbr_annot_cols] + list(design_matrix['name'])\n\n headers = pd.DataFrame([sample_head_str, label_row])\n headers.columns = normalyzer_vals.columns\n\n return headers", "def _index_column_set(column_set: List[ColumnSchema]) -> List[Tuple[str, int]]:\n out = defaultdict(int)\n for column_schema in column_set:\n key = column_schema_to_keys(column_schema)\n out[key] += 1\n return list(out.items())", "def indices_get_mapping(es):\n index = 'customer'\n\n print(es.indices.get_mapping(index=index))", "def create_class_indices(self) -> None:\n\n categories = list(np.loadtxt(self.path_manager.categories_file(), delimiter=\",\", dtype=str))\n\n if self.include_noise_samples and not self.multi_label_classification:\n categories.append(\"noise\")\n\n self.class_to_idx = {}\n\n for idx, class_name in enumerate(sorted(categories)):\n self.class_to_idx[class_name] = idx", "def mask_header(data_mask, header_mask):\n\n mask = {}\n text = {'bad': 'BP', 'edge': 'EP', 'saturated': 'SP',\n 'saturated-connected': 'SCP', 'satellite trail': 'STP',\n 'cosmic ray': 'CRP'}\n\n for mask_type in text.keys():\n value = get_par(set_zogy.mask_value[mask_type],tel)\n mask[mask_type] = (data_mask & value == value)\n header_mask['M-{}'.format(text[mask_type])] = (\n True, '{} pixels included in mask?'.format(mask_type))\n header_mask['M-{}VAL'.format(text[mask_type])] = (\n value, 'value added to mask for {} pixels'.format(mask_type))\n header_mask['M-{}NUM'.format(text[mask_type])] = (\n np.sum(mask[mask_type]), 'number of {} pixels'.format(mask_type))\n\n return" ]
[ "0.60388774", "0.5985021", "0.5983487", "0.5812272", "0.573689", "0.57180965", "0.5631869", "0.55975854", "0.55681896", "0.5565681", "0.554784", "0.5505632", "0.5441756", "0.5394642", "0.53900725", "0.53899986", "0.5387639", "0.537637", "0.5363915", "0.53568316", "0.53414994", "0.53264475", "0.53180736", "0.52968967", "0.5296696", "0.52717334", "0.523432", "0.52338445", "0.5221073", "0.5197063", "0.51961637", "0.51677465", "0.51670617", "0.5143451", "0.5141884", "0.51299953", "0.5112296", "0.50799704", "0.5070182", "0.50370145", "0.50309104", "0.50275165", "0.5022587", "0.5021352", "0.5011724", "0.501067", "0.5006809", "0.49966705", "0.49958965", "0.4993834", "0.49890873", "0.4977726", "0.49753785", "0.49732548", "0.49728718", "0.49683043", "0.4963201", "0.49614576", "0.4956038", "0.4955202", "0.49545345", "0.49531713", "0.49473414", "0.49463058", "0.49354705", "0.49321753", "0.49301812", "0.49239618", "0.4923402", "0.49156016", "0.49089992", "0.4907962", "0.49009293", "0.4899426", "0.489764", "0.48914996", "0.48797262", "0.4871322", "0.48672453", "0.48628187", "0.4862035", "0.4850852", "0.4849413", "0.48449454", "0.4843381", "0.484153", "0.48386338", "0.48382095", "0.48382095", "0.48362377", "0.48265663", "0.48213154", "0.48168445", "0.4811637", "0.48025075", "0.47980416", "0.47882167", "0.47843325", "0.47802198", "0.4780186" ]
0.6957255
0
Generates mapping from water measurements column names to values of the given CSV row.
def get_water_value_map(row, column_names_map): column_values_map = column_names_map.copy() row_length = len(row) empty = True for key, index in column_names_map.items(): # Check if non-empty value exist for given index. if -1 < index < row_length: value = row[index].strip() if value: column_values_map[key] = value empty = False continue # Else NULL is inserted in db. column_values_map[key] = 'NULL' return None if empty else column_values_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def create_deft_table_csv_mappings():\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings", "def tsvRowToDict(row):\n return {col: getattr(row, col) for col in row._columns_}", "def _properties_from_csv_row(row, header, ignored_columns):\n props = {}\n for h, prop in enumerate(header):\n # Handle a strange edge case where the length of the row is longer than the length of the header.\n # We do this to prevent an out of range error.\n x = h\n if x > len(row) - 1:\n x = len(row) - 1\n if row[x] == '' or prop in ignored_columns:\n continue\n else:\n try:\n # We use literal_eval() here to de-stringify numbers, lists and objects in the CSV data\n p = literal_eval(row[x])\n props[prop] = p\n except (SyntaxError, ValueError) as e:\n props[prop] = row[x]\n return props", "def make_dict(row):\n return dict((key[0], value) for key, value in zip(colnames, row))", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table", "def process_csv():\n csv_rows = []\n fieldnames = ['site',\n 'latitude',\n 'longitude',\n 'city',\n 'region_code',\n 'country_code',\n 'continent_code',\n 'min_ip_hex',\n 'max_ip_hex',\n 'transit_provider',\n 'min_ip',\n 'max_ip',\n 'ip_prefix',\n 'min_ipv6_hex',\n 'max_ipv6_hex',\n 'min_ipv6',\n 'max_ipv6',\n 'ipv6_prefix']\n\n location_map = build_location_map()\n\n # Read in the CSV file and augment the columns\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n csv_rows.append(process_row(row, location_map))\n\n # Write the new CSV file with new columns\n with open(OUTPUT_FILE, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n\n print(\"MLab Sites CSV generated at {0}\".format(OUTPUT_FILE))", "def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict", "def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()", "def get_m_to_me_metabolite_mapping():\n f = pandas.read_csv(fixpath(\"m_to_me_mets.csv\"), index_col=0)[\"me_name\"]\n return f.dropna().to_dict()", "def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields", "def map_line(reader, headers):\n\n readings = {}\n\n line = reader.readline()\n\n if len(line) == 0:\n raise EOFError('That\\'s all the data!')\n\n line = line.rstrip()\n\n value_strings = line.split(' ')\n for i, string in enumerate(value_strings):\n if string != 'NaN':\n value = float(string)\n\n if i < len(headers):\n if headers[i]['is_point']:\n value = get_decimal_degrees(value)\n key = headers[i]['name'] + \"-\" + headers[i]['units']\n readings[key] = value\n\n # Provide generic timestamp regardless of type for iterator\n # convenience\n # Keep originals for those interested\n if 'm_present_time-timestamp' in readings:\n readings['timestamp'] = readings['m_present_time-timestamp']\n elif 'sci_m_present_time-timestamp' in readings:\n readings['timestamp'] = readings['sci_m_present_time-timestamp']\n\n return readings", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict", "def table_row_to_dict(row, make_quantity=True):\n data = {}\n for name, col in row.columns.items():\n val = row[name]\n\n if make_quantity and col.unit:\n val = Quantity(val, unit=col.unit)\n data[name] = val\n return data", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def make_row_map(file_path, key_field, field_map=None, transforms=None, \\\n file_encoding=None):\n\n with open(file_path, encoding=file_encoding) as file:\n # preprocess transforms\n if transforms:\n _transforms = {}\n for tf_field, tf in transforms.items():\n _type = type(tf).__name__\n if _type not in ['str', 'function']:\n raise ValueError('Invalid transform')\n _transforms[tf_field] = {\n 'transform': tf,\n 'type': _type\n }\n\n # get fields from csv\n fields_reader = csv.reader(file)\n fields = next(fields_reader)\n\n # make sure we aren't missing any field names\n first_row = next(fields_reader)\n if len(fields) != len(first_row):\n raise ValueError('Header has a different number of columns than data')\n\n # apply field map\n if field_map:\n # TODO use a case insensitive dictionary for field map\n fields = [field_map.get(field.lower()) or field for field in fields]\n key_field = field_map.get(key_field) or key_field\n\n # lowercase\n fields = [field.lower() for field in fields]\n\n # handle spaces\n fields = [field.replace(' ', '_') for field in fields]\n\n # use namedtuple for rows\n fields_joined = ' '.join(fields)\n Row = namedtuple('Row', fields_joined)\n\n # make map\n row_map = {}\n reader = csv.DictReader(file, fieldnames=fields)\n\n for i, row in enumerate(reader):\n key = row[key_field]\n\n # apply transforms\n if transforms:\n for tf_field, tf_map in _transforms.items():\n tf = tf_map['transform']\n tf_type = tf_map['type']\n source_val = row[tf_field]\n if tf_type == 'str':\n val = getattr(source_val, tf)()\n else:\n val = tf(source_val)\n row[tf_field] = val\n\n # row_map[key] = row\n # str_row = {key: str(val) for key, val in row.items()}\n row_map[key] = Row(**row)\n # from pprint import pprint\n # pprint(str_row)\n # row_map[key] = Row(**str_row)\n\n return row_map", "def list_water_temps(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n temp_list = [temp[\"Water Temp\"] for temp in reader]\n return temp_list", "def create_dict_from_file(filename, delimeters, first_char, column_names):\n\n # This opens the\n measurement_output = open('measurement_output.txt', \"w\", encoding=\"utf8\")\n # This creates and initializes a list to serve as a dictionary container outside of the for-loop.\n measurements_file_container = {}\n\n # This opens the file and then splits it (preserving the commas because of the landfall count requirement).\n if not filename.endswith('.txt'):\n print('Input File Must Be a .txt File')\n return None\n elif delimeters != '{}=|{}=|{}='.format(column_names[0], column_names[1], column_names[2]):\n print('Please Check Syntax for Delimeters and colunm_names.')\n return None\n else:\n with open(filename, 'r') as infile:\n for line in infile:\n line = line.strip()\n # This checks to see if line begins with a numeric character; if so, it is a header for a new measurement.\n if line[0].isnumeric():\n measurement_current_line = line.split()\n # This initializes a new measurement dictionary with the 3 items in column_names\n key = measurement_current_line[0]\n new_measurement_dictionary = {\n column_names[0]: '0',\n column_names[1]: '0',\n column_names[2]: '0',\n }\n #print(measurement_current_line)\n # this determines if a line starts with 'X', splits it at the X =,Y =,Z = indicators\n # to spit out a list containing only the 3 values and then updates the corresponding\n # value in the dictionary\n if line[0] == first_char:\n measurement_current_line = re.split(delimeters, line.strip(' '))\n if len(measurement_current_line) == 4:\n new_measurement_dictionary[column_names[0]] = float(measurement_current_line[1].strip())\n new_measurement_dictionary[column_names[1]] = float(measurement_current_line[2].strip())\n new_measurement_dictionary[column_names[2]] = float(measurement_current_line[3].strip())\n measurements_file_container[key] = new_measurement_dictionary\n # this stops the processing when the end of data key '$$EOE' is reached.\n elif line == '$$EOE':\n break\n\n\n return(measurements_file_container)", "def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary", "def columnar(row_table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n \n first_row: dict[str, str] = row_table[0]\n for column in first_row:\n result[column] = column_values(row_table, column)\n \n return result", "def write_row(row: dict):\n row = {k: format_float(v) for k, v in row.items()}\n writer.writerow(row)\n csvfile.flush()", "def loadData(self, aircraftCSV='aircraft.csv'):\n aircraftDict = {}\n \n with open(aircraftCSV, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for line in reader:\n #if imperial convert to metric\n if line[2] == 'imperial':\n range = float(line[4]) * 8 / 5\n else:\n range = float(line[4])\n aircraftDict[line[0]] = [line[1], line[3], range]\n self.aircraftDict = aircraftDict", "def process_columns(tup: tuple):\n column_name, data, source_name, data_type, quantiles = tup\n column = Column(column_name, data, source_name, data_type, quantiles)\n print(\"Processing column: \", column.get_long_name())\n column.quantile_histogram = QuantileHistogram(column.get_long_name(), column.ranks, column.size, quantiles)\n with open('cache/' + column.get_long_name() + '.pkl', 'wb') as output:\n pickle.dump(column, output, pickle.HIGHEST_PROTOCOL)", "def _deduct_types(cls, row: Row) -> ColumnTypes:\n return {\n key: get_value_type(cls.whole_number_to_int(value))\n for key, value in row.items()\n }", "def map_csv_dimensions(length: str, width: str, height: str):\n return {\n \"length\": length,\n \"width\": width,\n \"height\": height\n }", "def getHourlyWeatherFromCSV(self,town,scale,key):\n\n\t\t# Variables\n\t\tfile = \"data/weather/\"+town+\"_\"+scale+\".csv\"\n\t\tcsv_data = []\n\t\tweather_data = []\n\t\tweather = {}\n\n\t\t# Reading csv file and storing data in file\n\t\twith open(file) as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\t\t\tfor row in reader:\n\t\t\t\tcsv_data.append(row) \n\t\t# Getting data that is needed for visualization\n\n\t\tprint csv_data\n\n\t\tfor data in csv_data:\n\t\t\t# Parsing date\n\t\t\thour = int(data[\"date\"].split(\" \")[4].split(\":\")[0])\n\t\t\tpm_or_am = data[\"date\"].split(\" \")[5]\n\t\t\tday = data[\"date\"].split(\",\")[0]\n\t\t\tif hour == 12 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 00:00\"\n\t\t\telif hour < 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 0\" + str(hour) + \":00\"\n\t\t\telif hour >= 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tif pm_or_am == \"PM\":\n\t\t\t\tif hour == 12: \n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\thour +=12\n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tweather[\"date\"] = data[\"date\"]\n\n\t\t\t# Appending weather data\n\t\t\tweather[key] = data[key]\n\t\t\tweather_data.append(weather)\n\t\t\tweather = {}\n\t\treturn weather_data", "def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict", "def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]", "def mapper(self, _, doc):\n ret = doc.split('\\t')\n key = ret[2]\n values = {}\n try:\n values[\"ts_ini\"] = datetime.utcfromtimestamp(float(ret[0]))\n except:\n values[\"ts_ini\"] = None\n try:\n values[\"ts_end\"] = datetime.utcfromtimestamp(float(ret[1]))\n except:\n values[\"ts_end\"] = None\n try:\n values[\"value\"] = ret[3]\n except:\n values[\"value\"] = None\n try:\n values[\"energytype\"] = ret[4]\n except:\n values[\"energytype\"] = None\n try:\n values[\"source\"] = ret[5]\n except:\n values[\"source\"] = None\n\n yield key, values", "def _get_cell_map(row: TRowResult) -> Dict[bytes, TCell]:\n if row.sortedColumns is not None:\n return {c.columnName: c.cell for c in row.sortedColumns}\n elif row.columns is not None:\n return row.columns\n else: # pragma: no cover\n raise RuntimeError(\"Neither columns nor sortedColumns is available!\")", "def SAM_CSV_to_solar_data(filename):\n if not os.path.isfile(filename):\n raise FileNotFoundError(filename + \" does not exist.\")\n wfd = defaultdict(list)\n with open(filename) as file_in:\n info = []\n for i in range(2):\n info.append(file_in.readline().rstrip())\n info[i] = info[i].split(\",\")\n if \"Time Zone\" not in info[0]:\n raise ValueError(\"`Time Zone` field not found in solar resource file.\")\n latitude = info[1][info[0].index(\"Latitude\")]\n longitude = info[1][info[0].index(\"Longitude\")]\n tz = info[1][info[0].index(\"Time Zone\")]\n elev = info[1][info[0].index(\"Elevation\")]\n source = info[1][info[0].index(\"Source\")]\n reader = csv.DictReader(file_in)\n for row in reader:\n for col, dat in row.items():\n if len(col) > 0:\n wfd[col].append(float(dat))\n\n weather = dict()\n weather['tz'] = float(tz)\n weather['elev'] = float(elev)\n weather['lat'] = float(latitude)\n weather['lon'] = float(longitude)\n\n # Create dict with keys = keys passed to SAM and values = list of possible key versions found in resource files (NREL / NASA POWER)\n acceptable_keys = {\n 'year' : ['year', 'Year', 'yr'],\n 'month' : ['month', 'Month', 'mo'],\n 'day' : ['day', 'Day'],\n 'hour' : ['hour', 'Hour', 'hr'],\n 'minute' : ['minute', 'Minute', 'min'],\n 'dn' : ['dn', 'DNI','dni', 'beam', 'direct normal', 'direct normal irradiance'],\n 'df' : ['df', 'DHI', 'dhi', 'diffuse', 'diffuse horizontal', 'diffuse horizontal irradiance'],\n 'gh' : ['gh', 'GHI','ghi', 'global', 'global horizontal', 'global horizontal irradiance'],\n 'wspd' : ['wspd', 'Wind Speed', 'wind speed'],\n 'tdry' : ['tdry', 'Temperature', 'dry bulb', 'dry bulb temp', 'temperature', 'ambient', 'ambient temp'],\n 'wdir' : ['wdir', 'Wind Direction', 'wind direction'],\n 'pres' : ['pres', 'Pressure', 'pressure'],\n 'tdew' : ['tdew', 'Dew Point', 'Tdew', 'dew point', 'dew point temperature'],\n 'rhum' : ['rhum', 'Relative Humidity', 'rh', 'RH', 'relative humidity', 'humidity'],\n 'alb' : ['alb', 'Surface Albedo', 'albedo', 'surface albedo'],\n 'snow' : ['snow', 'Snow Depth', 'snow depth', 'snow cover']\n }\n \n # enumerates acceptable_keys, inserts key and values into weather dictionary if found in the resource file\n for key, list_of_keys in acceptable_keys.items():\n for good_key in list_of_keys:\n if good_key in wfd.keys():\n weather[key] = wfd.pop(good_key)\n break\n\n # handles averaged hourly data with no minute column provided by NASA POWER and removes 2/29 data for leap years\n # this is a workaround so PySAM/SAM processes as instantaneous data (not setup to handle no minute column)\n if source == 'NASA/POWER':\n weather['minute'] = [30] * len(weather['hour'])\n if len(weather['hour']) == 8784:\n for key in weather.keys():\n if key not in ['tz','elev','lat','lon']:\n del weather[key][1416:1440]\n\n\n return weather", "def _get_values(uprow, types):\n tmp_uprow = {k: _for_pgsql(v, types[k]) for k, v in uprow.items()}\n mappable = \",\".join([\"{\" + str(k) + \"}\" for k in uprow.keys()])\n values = mappable.format(**tmp_uprow)\n return values", "def cell_map_from_csv(self, source_file: str) -> None:\n if source_file[-4:] == '.csv':\n try:\n self._import_source_data(source_file)\n except Exception:\n print(\"Problem with that CSV file. File extension?\")", "def map_column_values(self, table: Table, column: Column, name: str, *args):\n self._requires_table(table)\n\n values = []\n for index in table.index:\n cell = table.get_cell(index, column)\n output = BuiltIn().run_keyword(name, cell, *args)\n values.append(output)\n\n table.set_column(column, values)", "def mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping):\n\n if \"FullPeptideName\" in header_dict:\n\n peptide_name = this_row[header_dict[\"FullPeptideName\"]]\n\n transitions = []\n pr_transitions = []\n if \"aggr_Fragment_Annotation\" in header_dict:\n transitions = this_row[ header_dict[\"aggr_Fragment_Annotation\"] ].split(\";\")\n if \"aggr_prec_Fragment_Annotation\" in header_dict:\n pr_transitions = this_row[ header_dict[\"aggr_prec_Fragment_Annotation\"] ].split(\";\")\n\n # Skip row if there are no transitions\n if len(transitions) == 0:\n return\n\n if len(transitions[-1]) == 0:\n transitions = transitions[:-1]\n if len(pr_transitions) > 0 and len(pr_transitions[-1]) == 0:\n pr_transitions = pr_transitions[:-1]\n\n # Get charge state (may be absent)\n charge_state = \"0\"\n if \"Charge\" in header_dict:\n charge_state = this_row[header_dict[\"Charge\"]]\n\n if charge_state == \"NA\" or charge_state == \"\":\n charge_state = \"0\"\n\n key = peptide_name + \"/\" + charge_state\n prkey = peptide_name + \"/\" + charge_state + \"_pr\"\n precursors_mapping [ key ] = transitions\n precursors_mapping [ prkey ] = pr_transitions\n mapped_precursors = sequences_mapping.get( peptide_name, [] )\n mapped_precursors.extend([key, prkey])\n sequences_mapping[peptide_name] = mapped_precursors # = [ key, prkey ]\n\n if \"ProteinName\" in header_dict:\n protein_name = this_row[header_dict[\"ProteinName\"]]\n\n tmp = protein_mapping.get(protein_name, [])\n if peptide_name not in tmp:\n tmp.append(peptide_name)\n protein_mapping[protein_name] = tmp", "def csv_dict_reader(file_obj):\r\n with open('heatmap_data_10_200_out.csv','wb') as file:\r\n\t reader = csv.DictReader(file_obj, delimiter=',')\r\n\t for line in reader:\r\n\t \t# data = \"{location: new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \") , weight: \" + str(float(line[\"predicted_price\"])/1000) + \" }, \"\r\n\t \tdata = line[\"predicted_price\"] + \";\" + str(line[\"latitude\"]) + \"; \" + str(line[\"longitude\"]) \r\n\t \t# data = \"new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \"),\"\r\n\t \tprint data\r\n\t # print(line[\"latitude\"]),\r\n\t # print(line[\"longitude\"])\r\n\r\n\t \r\n\t \tfile.write(data)\r\n\t \tfile.write('\\n')", "def load_csv(file):\n import csv\n reader = csv.reader(open(file, 'r'))\n columns = reader.next()\n c2i = dict((columns[i], i) for i in range(len(columns)))\n data = {}\n excluded = set([REP_CSV_HED_TIM, REP_CSV_HED_HER])\n for row in reader:\n \n # get relevant info from the line\n time = float(row[c2i[REP_CSV_HED_TIM]])\n hero = row[c2i[REP_CSV_HED_HER]]\n other = dict((c, REP_CSV_HANDLERS.get(c, REP_CSV_DEFHANDLER)(row[c2i[c]])) for c in columns if c not in excluded)\n \n # add to the data dictionary\n if hero not in data: data[hero] = []\n data[hero].append([time] + [other])\n \n return data", "def read_features_dict(path):\n # type_dict specifies the type conversion to be applied. Each key denotes\n # a column name and the value is the conversion. Columns not included are\n # converted to floats.\n type_dict = {'source': str, 'target': str, 'status': int}\n with open(path) as feature_file:\n reader = csv.DictReader(feature_file, delimiter='\\t')\n for row in reader:\n yield {key: type_dict.get(key, float)(value) for key, value in row.items()}", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def process_row(row, location_map):\n\n # Add in IP columns\n ips = get_ip_extent(row[IP_PREFIX_COLUMN])\n ipv6_ips = get_ip_extent(row[IPV6_PREFIX_COLUMN])\n\n continent_code = CONTINTENT_CODES[row[CONTINENT_COLUMN]] if row[CONTINENT_COLUMN] in CONTINTENT_CODES else None\n site_id = row[SITE_COLUMN]\n\n location = location_map[site_id.lower()]\n\n # simplify\n row = {\n 'site': site_id.lower(),\n 'latitude': location['latitude'],\n 'longitude': location['longitude'],\n 'city': row[CITY_COLUMN],\n 'region_code': row[REGION_CODE_COLUMN],\n 'country_code': row[COUNTRY_CODE_COLUMN],\n 'continent_code': continent_code,\n 'min_ip_hex': hex_encode_ip(ips[0]) if ips else None,\n 'max_ip_hex': hex_encode_ip(ips[-1]) if ips else None,\n 'transit_provider': row[TRANSIT_PROVIDER],\n 'min_ip': str(ips[0]) if ips else None,\n 'max_ip': str(ips[-1]) if ips else None,\n 'ip_prefix': normalize_ip(row[IP_PREFIX_COLUMN]),\n 'min_ipv6_hex': hex_encode_ip(ipv6_ips[0]) if ipv6_ips else None,\n 'max_ipv6_hex': hex_encode_ip(ipv6_ips[-1]) if ipv6_ips else None,\n 'min_ipv6': str(ipv6_ips[0]) if ipv6_ips else None,\n 'max_ipv6': str(ipv6_ips[-1]) if ipv6_ips else None,\n 'ipv6_prefix': normalize_ip(row[IPV6_PREFIX_COLUMN]),\n }\n\n return row", "def create_table(f, geoinfo):\n bounds_cols = xb_points + yb_points\n df = pd.read_csv(f, delimiter=\";\", index_col=\"INDEX_RC\")\n df[duration_name] = parse_duration_level(f)\n df = df.join(geoinfo[[\"X_CENT_GEO\", \"Y_CENT_GEO\", \"Col\", \"Row\"]])\n df = df.rename(columns={\"Col\": x, \"Row\": y, \"X_CENT_GEO\": lon, \"Y_CENT_GEO\": lat})\n return df", "def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary", "def read_delsys_csv(filename: str) -> Dict[str, Dict[str, TimeSeries]]:\n # Check the number of rows to skip\n n_rows = 0\n with open(filename, 'r') as fid:\n while True:\n s = fid.readline()\n if s.startswith('X[s]'):\n break\n else:\n n_rows += 1\n\n # Open the CSV\n df = pd.read_csv(filename, skiprows=n_rows)\n\n # Create a TimeSeries for each signal since they all have different time\n # vectors\n n_signals = int(len(df.columns) / 2)\n\n emg = {}\n acc = {}\n gyro = {}\n mag = {}\n\n for i_signal in range(n_signals):\n time = df.iloc[:, i_signal * 2].to_numpy()\n name = df.columns[i_signal * 2 + 1]\n data = df.iloc[:, i_signal * 2 + 1].to_numpy()\n\n if ': Acc' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n acc[short_name] = ts\n elif ': Mag' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n mag[short_name] = ts\n elif ': Gyro' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n gyro[short_name] = ts\n elif ': EMG' in name:\n short_name = name.split(':')[0]\n ts = TimeSeries(time=time, data={short_name: data})\n emg[short_name] = ts\n\n return {'emg': emg, 'acc': acc, 'gyro': gyro, 'mag': mag}", "def get_rows(csv):\n\n labels = csv[0][2:].split(',')\n\n # Convert each row into a hash of label: value\n return [dict(zip(labels, row.split(','))) for row in csv[1:]]", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def extract_sensors_data(dataframe, ms_column='ms_ticker',\n time_column = 'Tstamp',\n ppg_columns=['led_1', 'led_2'],\n acc_columns=['acc_x', 'acc_y', 'acc_z']):\n\n sensors_dict = {}\n sensors = dataframe.loc[1:, 1:]\n sensors_columns = dataframe.head(1).values[0]\n sensors_columns = [i.replace(\" \", \"\") for i in sensors_columns if i.find('Index') == -1]\n sensors.columns = sensors_columns\n check_columns_exist(ppg_columns, sensors_columns)\n check_columns_exist(acc_columns, sensors_columns)\n check_columns_exist(ms_column, sensors_columns)\n check_columns_exist(time_column, sensors_columns)\n ppg = np.array(sensors[ppg_columns].values[1:, :], dtype=int)\n ms = np.array(sensors[ms_column].values[1:, ])\n ms_ints = np.array([int(str(i)[-3:]) for i in ms], dtype=float)\n ms_delta = [datetime.timedelta(milliseconds=i) for i in ms_ints]\n\n time = dataframe.loc[:,1].values[1:]\n time = np.array([pd.to_datetime(i) for i in time])\n time_with_ms = np.array(ms_delta) + time\n\n sensors_dict['PPG'] = ppg\n sensors_dict['time_sensors'] = time_with_ms.astype('datetime64[us]')\n sensors_dict['ms_ticker_sensors'] = ms\n acc = np.array(sensors[acc_columns].values[1:, :], dtype=float)\n sensors_dict['ACC'] = acc\n\n return sensors_dict", "def csv2dict(filename):\n dis_dict = {}\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n el_a = row[\"Element Name\"]\n dis_dict[el_a] = {}\n for entry in row:\n if entry != \"Element Name\":\n dis_dict[el_a][entry] = float(row[entry])\n csvfile.close()\n return dis_dict", "def generic_mapper(fname, path, delimiter='\\t'):\n\tout = {'filename':fname}\n\t_, meta_data = read_radiant_txt(path+fname, delimiter=delimiter, return_meta_data=True)\n\tkeys = ['Type', 'SampleName', 'SampleArea(cm2)', 'SampleThickness(um)', 'Volts', 'Field', 'PulseWidth(ms)', 'PulseDelay(ms)']\n\tfor key in keys:\n\t\ttry:\n\t\t\tout.update({key:float(meta_data[key])})\n\t\texcept ValueError:\n\t\t\tout.update({key:meta_data[key]})\n\t\texcept KeyError:\n\t\t\tout.update({key:np.nan})\n\t\t\t\n\t# period is a weird one\n\ttry:\n\t\tout.update({'Period(ms)':float(meta_data['{}Period(ms)'.format(meta_data['Type'].capitalize())])})\n\texcept:\n\t\tpass\n\treturn out", "def format_row(rowvals, dtypes, colnames):\n mapped_vals = []\n for colnum, val in enumerate(rowvals):\n pgtype = dtype_to_pgtype(dtypes[colnum], colnames[colnum])\n mapped_vals.append(numpy_val_to_pg_val(val, pgtype))\n\n return ','.join(mapped_vals)", "def agline(line):\n\n vals = {}\n x = ['date', 'movie', 'offset', 'well', 'gen', 'flarem', 'flares', \n 'chargem', 'charges', 'charget', 'escdm', 'escds', 'escddur', 'escmm', 'escms', \n 'escmdur']\n y = line.strip('\\n').split(',')[0:16]\n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def _row_to_dict(row, fields):\n dict_row = {}\n for i, value in enumerate(row):\n key = fields[i]\n if value and str(value).lower() == 'nan':\n value = None\n dict_row[key] = value\n return dict_row", "def read_polyidus_csv(filename):\n\twith open(filename, newline = '') as filehandle:\n\t\t\n\t\t# create DictReader objects for inputs and read into memory\n\t\treader = csv.DictReader(filehandle, delimiter = '\\t')\n\t\tdata = []\n\t\tread_ids = []\n\t\t\n\t\tfor row in reader:\n\t\t\trow_data = {}\n\t\t\trow_data['Chr'] = row['Chrom']\n\t\t\trow_data['VirusRef'] = row['ChromVirus']\n\t\t\trow_data['OverlapType'] = 'none'\n\t\t\trow_data['Type'] = 'chimeric'\n\t\t\t\n\t\t\thPosition = int(row['IntegrationSite'])\n\t\t\tvPosition = int(row['ViralIntegrationSite'])\n\t\t\t#hOris = row['StrandHost'].split(', ')\n\t\t\treadIDs = row['FragmentName'].split(', ')\n\t\t\t\n\t\t\t# make one row per read, if we haven't already used this read\n\t\t\tfor i, read in enumerate(readIDs):\n\t\t\t\tif read not in read_ids:\n\t\t\t\t\tread_ids.append(read)\n\t\t\t\t\t\n\t\t\t\t\t# need to make copy of dict\n\t\t\t\t\trow_data = dict(row_data)\n\t\t\t\t\t\n\t\t\t\t\t# add info about this read to dict\n\t\t\t\t\trow_data['IntStart'] = hPosition\n\t\t\t\t\trow_data['IntStop'] = hPosition\n\t\t\t\t\trow_data['VirusStart'] = vPosition\n\t\t\t\t\trow_data['VirusStop'] = vPosition\n\t\t\t\t\t#row_data['Orientation'] = 'hv' if hOris[i] == \"Positive\" else 'vh'\n\t\t\t\t\trow_data['type'] = 'chimeric'\n\t\t\t\t\trow_data['ReadID'] = read\n\t\t\t\t\t\n\t\t\t\t\tdata.append(row_data)\n\n\treturn data", "def _getValues(self):\n res = {}\n for colname, column in self._iterNameColumn():\n res[colname] = column.toSql(self._values[colname])\n return res", "def create_assumption_map(columns, df):\n assumption_map = pd.DataFrame(columns=columns)\n\n for fuel in fuel_types:\n for value in values:\n if fuel == \"coal\" and value == \"delta_capex\":\n retrofit = True\n else:\n retrofit = False\n array = values_array(df, fuel, value, retrofit=retrofit)\n mean, trim_mean_v, std, top_p, bot_p = value_stats(array)\n\n if value == \"delta_capex\":\n units = \"2019€_KW\"\n elif value == \"delta_om\":\n units = \"2019€_KWh\"\n elif value == \"delta_heatrate\":\n units = \"KW_KWh\"\n assumption_map = assumption_map.append({\"fuel_type\": fuel, \"value\": value, \"range_low\": bot_p,\n \"range_high\": top_p, \"reference_value\": mean, \"units\": units},\n ignore_index=True)\n return assumption_map", "def parse_column_units(\n filepath: Path, sheet_name: str = None\n) -> Dict[str, ureg.Quantity]:\n method = map_input_method(filepath)\n unit_header = method(str(filepath), nrows=1, header=0)\n given_unit = lambda s: unit_header[s].values[0]\n parsed_units = {}\n required_columns = [\n \"Supply Temperature\",\n \"Target Temperature\",\n \"Heat Capacity Flowrate\",\n \"Enthalpy\",\n ]\n\n for column in required_columns:\n units = ureg.parse_units(given_unit(column))\n parsed_units[column] = units\n\n return parsed_units", "def input_row():\n return {\n 'foo': 1,\n 'bar': 2,\n 'spam': 3,\n 'eggs': 4\n }", "def process_csv(self):\n with open(self.filepath, mode=\"r\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n header = next(csv_reader)\n\n date_idx = self._get_header_position(header, \"Label\")\n power_idx = self._get_header_position(header, \"kW\")\n\n if self.inverter_id not in header[power_idx]:\n raise Exception(\"Inverter data returned for the incorrect meter.\")\n\n for row in csv_reader:\n date_obj = self.csv_str_to_date(row[date_idx])\n power = float(row[power_idx] or 0)\n\n current_date = self.date_to_final_str(date_obj)\n rounded_time = self.round_up_to_quarter_hour(date_obj)\n\n if current_date not in self.intermediate_readings:\n self.intermediate_readings[\n current_date\n ] = self.build_intermediate_dict()\n\n current_reading = self.intermediate_readings[current_date][rounded_time]\n # Here's where we sum power readings together - in to fifteen min intervals\n self.intermediate_readings[current_date][rounded_time] = (\n current_reading + power\n )\n\n actual_time = self.date_to_intermediate_time_str(date_obj)\n if rounded_time == actual_time:\n # Here's where we average power readings together, in fifteen minute intervals\n self.intermediate_readings[current_date][rounded_time] = round(\n float(\n self.intermediate_readings[current_date][rounded_time] / 3\n ),\n 2,\n )\n\n return self.finalize_readings()", "def getRiverIDs(lookupCsv):\n\n d = {}\n with open(lookupCsv, \"rb\") as f:\n reader = csv.reader(f)\n\n # Discard header row\n reader.next()\n\n for row in reader:\n d[row[0]] = row[1]\n\n return d", "def _row_to_labels(row):\n labels = {}\n label_keys = ['name', 'qty', 'range_end', 'unit', 'comment']\n for key in label_keys:\n labels[key] = row[key]\n return labels", "def load_columns(self, csv_data):\n column_date = []\n column_time = []\n column_hold = []\n column_outcome = []\n for row in dataframe_to_rows(csv_data, index=False):\n cell_date = row[18]\n cell_date = cell_date.split(': ')[1]\n cell_time = row[23]\n cell_hold = row[24]\n cell_outcome = row[25]\n column_date.append(cell_date)\n column_time.append(cell_time)\n column_hold.append(cell_hold)\n column_outcome.append(cell_outcome)\n return column_date, column_time, column_hold, column_outcome", "def _convert_genres(self, df, genre_map):\n genre_transfrm = pd.read_csv(genre_map)\n genre = {}\n for row_label, row_array_Series in genre_transfrm.iterrows():\n genre[row_array_Series['Old']] = row_array_Series['New']\n\n df['MappedGenres'] = df['Genres'].map(genre)\n try:\n assert df['MappedGenres'].notna().all()\n except:\n print(\"WARNING: The Genre CSV needs to be updated. The following Genres need to be mapped: \")\n print([*df['Genres'][df['MappedGenres'].isna()].values])", "def meta_properties(self, date_col=\"Date\", type_col=\"Primary Type\", lat_col=\"Latitude\",\\\n lon_col=\"Longitude\", loc_col=\"Location\", out_fname=\"data_formated.csv\"):\n # implement keywords\n # would we have to deal w/ file w/o headers?\n data = pd.read_csv(self._path, usecols=[date_col, type_col, lat_col, lon_col, loc_col],\\\n parse_dates=[date_col], infer_datetime_format=True)\n data.sort_values(date_col, inplace=True)\n min_date = data.iloc[0][date_col]\n max_date = data.iloc[(data.shape[0]-1)][date_col]\n\n lat = []\n lon = []\n\n nulls = []\n for row in data.itertuples(index=True, name='Pandas'):\n index = (row.Index)\n # if lat, lon = nan, drop the row\n # update: confirmed that issue is with code, not with data; for some reason\n # csv is actually correctly grabbing location, there just legitimately are\n # entries w/o location data\n if pd.isnull(getattr(row, loc_col)):\n # print(\"row: {} got a {} for the 'Location' column with date: {}\".format(index, \\\n # getattr(row, loc_col), getattr(row, date_col)))\n if not pd.isnull(getattr(row, lat_col)) and not pd.isnull(getattr(row, lon_col)):\n lat.append(str(getattr(row, lat_col)))\n lon.append(str(getattr(row, lon_col)))\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n print(\\\n \"Successfully extracted lat, lon from lat_col, lon_col for row: {}\".format(index))\n else:\n nulls.append((index, getattr(row, date_col)))\n data.drop(index, inplace=True)\n # print(\"No location data available for row: {} with date: {}\".format(index,\\\n # getattr(row, date_col)))\n else:\n loc = literal_eval(getattr(row, loc_col))\n lat.append(loc[0])\n lon.append(loc[1])\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n\n data[\"Latitude\"] = lat\n data[\"Longitude\"] = lon\n data.drop(loc_col, axis=1, inplace=True)\n\n data.sort_values(\"Latitude\", inplace=True)\n min_lat = float(data.iloc[0][\"Latitude\"])\n max_lat = float(data.iloc[(data.shape[0]-1)][\"Latitude\"])\n\n data.sort_values(\"Longitude\", inplace=True)\n min_lon = float(data.iloc[0][\"Longitude\"])\n max_lon = float(data.iloc[(data.shape[0]-1)][\"Longitude\"])\n\n data.to_csv(self.__file_dir+'/'+out_fname, na_rep=\"\", header=False, index=False)\n\n attrs = {'min_date': min_date, 'max_date': max_date, \"min_lat\":min_lat,\\\n \"max_lat\":max_lat, \"min_lon\":min_lon, \"max_lon\":max_lon, \\\n \"dates\":pd.date_range(min_date, max_date), \"num_attributes\": data.shape[1],\\\n \"num_entries\":data.shape[0]}\n self._meta_dict = attrs\n self._meta_dict['df'] = data\n pickle.dump(data, open(CWD + \"/meta_dict.p\", \"wb\"))\n print(\"Num entries w/o location data: {}\".format(len(nulls)))\n pickle.dump(nulls, open(CWD + \"/nulls.p\", \"wb\"))\n\n # not include the formatted dataset?\n return attrs", "def counters(line):\n reader = csv.reader(StringIO(line), delimiter=',')\n row = reader.next()\n\n airport = row[4]\n\n try:\n delay = float(row[8])\n except ValueError:\n delay = 0.0\n\n return (airport, (1, delay, delay ** 2, delay, delay))", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def _gen_data(fhs, columns, sep):\n for fh in fhs:\n for line in fh:\n if line[0] == \"#\": continue\n toks = line.split(sep)\n yield toks[columns[0]], int(toks[columns[1]]), float(toks[columns[2]])", "def load_csv(input_filename_state):\n dataset = {}\n with open(input_filename_state) as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n location_col = -1\n week_ahead_col = -1\n quantile_col = -1\n value_col = -1 \n\n\n for i in range(len(header)):\n if header[i] == \"place\":\n location_col = i\n elif header[i] == \"week_ahead\":\n week_ahead_col = i\n elif header[i] == \"quantile\":\n quantile_col = i \n elif header[i] == \"value\":\n value_col = i\n \n for row in reader:\n state = row[location_col]\n\n # Skip the state if it is not listed in reichlab's state list.\n if state not in STATE_ID_MAPPING:\n continue\n state_id = STATE_ID_MAPPING[state]\n week_ahead = int(row[week_ahead_col])\n quantile = row[quantile_col]\n val = max(float(row[value_col]), 0)\n if week_ahead not in dataset:\n dataset[week_ahead] = {}\n if state_id not in dataset[week_ahead]:\n dataset[week_ahead][state_id] = {}\n dataset[week_ahead][state_id][quantile] = val\n return dataset", "def _compute_from_table_values(\n name=\"plant\", color=(0/255, 128/255, 0/255),\n germination_time=(3, 1), \n r_max=(1.0,1.0),\n maturation_time=(10, 1),\n stopping_color=(0, 0, 1),\n color_step=(10/255, 0, 0),\n c1=0.1,\n r_0=0.04\n ):\n\n c2 = 1\n k1, k2 = 0.3, 0.7\n unoccluded_c1 = c1 / k2\n h_0 = 0.1\n r_max = max(1, np.random.normal(MAX_RADIUS[name][0], MAX_RADIUS[name][1]))\n # r_max = MAX_RADIUS[name][0] + MAX_RADIUS[name][1]\n growth_time = generate_growth_time(germination_time, maturation_time, r_max, r_0, k2, c2)\n\n return {\n \"germination_time\": germination_time,\n \"maturation_time\": maturation_time,\n \"k1\": k1,\n \"k2\": k2,\n \"c1\": unoccluded_c1,\n \"c2\": c2,\n \"start_radius\": r_0,\n \"start_height\": h_0,\n \"r_max\": r_max,\n \"growth_time\": growth_time,\n \"plant_type\": name,\n \"color\": color,\n \"stopping_color\": stopping_color,\n \"color_step\": color_step\n }", "def daily_speed_sum_map(data):\n\t(byte_offset, line_value) = data\n\tcolumns = split_into_columns(line_value)\n\tif columns[3] != 'speed':\n\t\tyield (\"%s_%s\" % (columns[0], columns[1][:10]), columns[3])", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def to_python(self):\r\n mapping = {}\r\n for row in self.rows:\r\n mapping[row[0]] = _format_python_value(row[1])\r\n return mapping", "def load(filename):\n with open(filename,'r') as fd:\n csv_in = csv.reader(fd, delimiter=',', quotechar='\"')\n keys = csv_in.next()\n data = {k:[] for k in keys}\n for row in csv_in:\n for k,v in zip(keys,row):\n data[k].append(float(v))\n return data", "def row_to_colKey(row):\n return [(i[0], (row[0], i[1])) for i in row[1]]", "def func_value(file):\n \n with open( file ) as f:\n csv_reader = csv.DictReader(f)\n \n for row in csv_reader:\n Cl , Cd = row[' \"CL\" '] , row[' \"CD\" ']\n \n return float(Cl) , float(Cd)", "def dict_factory(self, cursor, row):\n results = {}\n for index, col_name in enumerate(cursor.description):\n results[col_name[0]] = row[index]\n\n return results", "def mapper(self, _, line):\n linea = line.split()\n causa, fallecidos = linea[0], linea[1]\n fallecidos_f = float(fallecidos)\n \n yield causa, (1, round(fallecidos_f))", "def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs):\n initial_data = kwargs.get('initial_data', None)\n model = model_class()\n\n # _log.debug(\"map_row's mappings {}\".format(mapping))\n\n # If there are any initial states we need to set prior to mapping.\n if initial_data:\n model = apply_initial_data(model, initial_data)\n\n # concat is not used as of 2016-09-14\n # concat = _set_default_concat_config(concat)\n\n for raw_field, value in row.items():\n is_extra_data = True if raw_field in extra_data_fields else False\n\n # Save the value if is is not None, keep empty fields.\n if value is not None:\n model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner)\n\n return model", "def get_data(file_name):\n csv_file = open(file_name, 'rb')\n train_content = csv.reader(csv_file)\n\n # ignore header\n train_content.next()\n\n # preprocessing functions for each column index\n # Several preprocessing can be defined for each column.\n # A new variable is associated to EACH preprocessing function\n preproc_funcs = {0: ['get_hour']}\n\n # Read data from file, store it as an integer\n data = []\n for row in train_content:\n data_row = []\n for n, col in enumerate(row):\n # if the current column requires preprocessing functions, apply them\n if preproc_funcs.has_key(n):\n # Each preprocessing give a new column\n for preproc_func in preproc_funcs[n]:\n func = globals().get(preproc_func)\n data_row.append(int(float(func(col))))\n # If no preprocessing, do nothing\n else:\n data_row.append(int(float(col)))\n\n data.append(data_row)\n\n csv_file.close()\n\n return data", "def get_mapping_info(in_path):\n if not utils.exists_file(in_path):\n utils.exit(f\"Mapping file does not exist:\\n{in_path}\")\n df = pd.read_csv(in_path, sep=\";\", header=None)\n mapping = {}\n case_video_count = {}\n for index, row in df.iterrows():\n # parts = row[0].split('_')\n # video_id = \"-1\"\n # if len(parts) == 1 or \"video\" not in parts[0]:\n # video_id = row[0]\n # else:\n # video_id = parts[1]\n # video_id = video_id.replace('.mp4', '')\n video_id = row[0]\n case_id = int(row[2])\n mapping[f\"{video_id}\"] = case_id\n utils.increment_dict_key(case_video_count, f\"{case_id}\")\n return mapping, case_video_count", "def read_csv(file_name):\n data = {}\n with open(file_name) as f:\n f = MyIter(f)\n try:\n for line in f:\n if not line.strip():\n continue\n if line == 'Points\\n':\n break\n key, val = read_key_value(line, separator=',')\n key = key.lower().replace(' ', '_')\n data[key] = val\n\n x_units, y_units = next(f).split(',')\n data['x_units'], data['y_units'] = x_units.strip(), y_units.strip()\n\n xs, ys = [], []\n for line in f:\n x, y = line.split(',')\n xs.append(float(x.strip()))\n ys.append(float(y.strip()))\n except Exception as e:\n print(f'Error on line {f._index}')\n print(f._line)\n raise e\n\n elong = Elongation(\n np.array(xs), np.array(ys),\n float(data['gauge_length']),\n float(data['sample_width']),\n float(data['sample_thickness'])\n )\n return [elong]", "def getColumnDictionary(self):\n try:\n column_dictionary = []\n con = self.getMetadataDatabaseConnection()\n column_values = con.cursor()\n con.cursor().callproc('qiime_assets.get_column_dictionary', [column_values])\n for row in column_values:\n # Skip if no column name is found\n if row[0] is None:\n continue\n\n # Some variables to allow for re-assignment should any of them be None\n column_name = row[0].lower()\n expected_values = row[1]\n description = row[2]\n data_type = row[3]\n max_length = row[4]\n min_length = row[5]\n active = row[6]\n \n if row[1] == None:\n expected_values == ''\n elif row[2] == None:\n description == ''\n elif row[3] == None:\n data_type = ''\n elif row[4] == None:\n max_length = ''\n elif row[5] == None:\n min_length = ''\n elif row[6] == None:\n min_length = ''\n \n list_item = (column_name, expected_values, description, data_type, max_length, min_length, active)\n column_dictionary.append(list_item)\n return column_dictionary\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def build_sample_map(flowcell):\n result = {}\n rows = [(lane, lib[\"name\"]) for lib in flowcell[\"libraries\"] for lane in lib[\"lanes\"]]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = \"S{}\".format(i)\n i += 1\n return result", "def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header", "def test_csvfile_get_columns(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"index\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.ASCENDING,\n exact=True,\n ),\n \"temperature\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"site\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary", "def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data", "def distance(row):\n return row['distance']", "def data_parser(df):\n\n chunks = []\n for row in df.itertuples():\n piece = {}\n piece['datetime'] = row[1]\n piece[row[3]] = row[4]\n chunks.append(piece)\n\n # Join dicts on shared 'datetime' keys.\n combine = defaultdict(dict)\n for elem in chunks:\n combine[elem['datetime']].update(elem)\n\n ordered = sorted(combine.values(), key=itemgetter(\"datetime\"))\n\n mapped_generation = []\n for item in ordered:\n mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]\n\n # Need to avoid multiple 'unknown' keys overwriting.\n complete_production = defaultdict(lambda: 0.0)\n for key, val in mapped_types:\n try:\n complete_production[key] += val\n except TypeError:\n # Datetime is a string at this point!\n complete_production[key] = val\n\n dt = complete_production.pop('datetime')\n final = (dt, dict(complete_production))\n mapped_generation.append(final)\n\n return mapped_generation", "def readcsv(csvreader,item:int,count:int):\n\n headerstring = next(csvreader)\n data = {}\n if item >= len(headerstring) or item < 0 or count >= len(headerstring) or count < 0:\n \n sys.stderr.write(\"your index values are out of bounds\")\n\n else:\n header = headerstring[item] + ',' + headerstring[count]\n for i in csvreader:\n key = i[item]\n try:\n if key in data:\n data[key] = float(data[key]) + float(i[count])\n else:\n data[key] = float(i[count])\n except ValueError:\n if key in data:\n data[key]+=0\n else:\n data[key] = 0\n return data,header", "def get_positions(directory): \n positions = {}\n names = {}\n pos_dict = {'1': \"GK\", '2': \"DEF\", '3': \"MID\", '4': \"FWD\"}\n fin = open(directory + \"/players_raw.csv\", 'rU',encoding=\"utf-8\")\n reader = csv.DictReader(fin)\n for row in reader:\n positions[int(row['id'])] = pos_dict[row['element_type']] \n names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']\n return names, positions", "def cuttoff_column_generator(A: dict, columns: list, threshold: float):\n for column_name in columns:\n with open('cache/' + column_name + '.pkl', 'rb') as pkl_file:\n column = pickle.load(pkl_file)\n yield A, column, threshold", "def parse_csv(data):\n\n # scan for CSRs first, so it's easier to resolve CSR-related constants\n # in the second pass\n for _type, _name, _address, _, __ in data:\n if _type == 'csr_base':\n peripherals[_name] = {'name': _name,\n 'address': _address,\n 'constants': {}}\n\n for _type, _name, _val, _val2, _ in data:\n if _type == 'csr_base':\n # CSRs have already been parsed\n pass\n elif _type == 'csr_register':\n # we are currently not interested in this\n pass\n elif _type == 'constant':\n found = False\n for _csr_name in peripherals:\n if _name.startswith(_csr_name):\n local_name = _name[len(_csr_name)+1:]\n peripherals[_csr_name]['constants'][local_name] = _val\n found = True\n break\n if not found:\n # if it's not a CSR-related constant, it must be a global one\n constants[_name] = {'name': _name, 'value': _val}\n elif _type == 'memory_region':\n mem_regions[_name] = {'name': _name,\n 'address': _val,\n 'size': _val2}\n else:\n print('Skipping unexpected CSV entry: {} {}'.format(_type, _name))", "def columndict_callback(c):\r\n\r\n def subQ(txt):\r\n return re.sub(r\"'([^']*)'\", r\"`\\1'\", (txt or '')).replace(r'|', r'\\|')\r\n\r\n return {\r\n 'name' : c.name,\r\n 'type' : c.type,\r\n 'nullable' : c.nullable,\r\n 'value' : subQ(c.value),\r\n 'default' : subQ(c.default),\r\n 'defaultf' : (\"\\n\\n*Default: %s*\" % subQ(c.default))\r\n if c.default else '',\r\n 'notnull' : '' if c.nullable else ' not null',\r\n 'desc': c.desc,\r\n 'descf': preformat_coldesc(c.desc), \r\n }", "def readExperimentParameters(filename): \n \n csvFile = csv.reader(open(filename))\n pythonSucks = next(csvFile)\n \n data = []\n for row in csvFile:\n list = []\n for i in range(len(row)):\n if(row[i+1] == \"\"):\n list.append(int(row[i]))\n break\n elif(row[i+1] == \"h\"):\n list.append(int(row[i]) * 60)\n break\n else:\n list.append(int(row[i]))\n \n data.append(tuple(list))\n return data", "def nomenclatura():\n df = pd.read_csv(\"Data/nomenclatura_1.csv\", encoding = \"latin1\")\n #dict_axis = df.set_index('id').T.to_dict('list')\n dict_axis = dict( [ (i, [a,b]) for i, a,b in zip(df.id, df.latitude, df.longitude) ] )\n\n return dict_axis", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d" ]
[ "0.62421864", "0.57918304", "0.5663072", "0.5657489", "0.56149256", "0.5612935", "0.5567542", "0.5544542", "0.5500382", "0.5480754", "0.545259", "0.544672", "0.5385878", "0.53811884", "0.5380167", "0.5332743", "0.53060913", "0.5292364", "0.527309", "0.52203315", "0.5217865", "0.5204893", "0.51956916", "0.51823163", "0.5171073", "0.51400554", "0.5128875", "0.5127345", "0.5125955", "0.5108462", "0.50986177", "0.50906026", "0.50851643", "0.507701", "0.5058715", "0.5047698", "0.50358063", "0.5025449", "0.50119126", "0.5000118", "0.49874973", "0.49859902", "0.49794003", "0.49692628", "0.49686694", "0.49564287", "0.49562824", "0.4944726", "0.49357253", "0.4931802", "0.49308315", "0.4929742", "0.49268916", "0.49139735", "0.49004272", "0.48895863", "0.4879501", "0.48757288", "0.4849314", "0.48488393", "0.48460367", "0.48438165", "0.48395935", "0.48366308", "0.4826754", "0.48108453", "0.480941", "0.48063457", "0.47943133", "0.47894722", "0.47881287", "0.4784646", "0.47833073", "0.47790784", "0.47728813", "0.47690222", "0.4764609", "0.47631037", "0.4759319", "0.47532785", "0.4744826", "0.47333178", "0.47310844", "0.47217765", "0.47126067", "0.47113746", "0.47110203", "0.4708233", "0.4706068", "0.47044417", "0.47009397", "0.47003907", "0.46994057", "0.46980676", "0.46959034", "0.46934825", "0.468143", "0.467427", "0.46720108", "0.46703273" ]
0.62722737
0
Populate water measurements table for selected `archive`, `directory` and `stations`.
def populate_water_measurements(cursor, archive, directory, station): csv_path = get_data_path( 'water', 'raw', archive, directory, f'{station}.csv' ) with open(csv_path, 'r', encoding='utf-8') as file: reader = csv.reader(file, delimiter=';') header = next(reader) column_names_map = get_water_index_map(archive, header) if not column_names_map: return False water_body = get_water_definitions(archive)['body'] for row in reader: column_values_map = get_water_value_map(row, column_names_map) if column_values_map: date = datetime.strptime(row[0], '%d.%m.%Y').date() data_columns = ', '.join(column_values_map.keys()) data_values = ', '.join(column_values_map.values()) cursor.execute(f'''INSERT INTO {water_body}_measurements (station_id, date, {data_columns}) VALUES ({station}, '{str(date)}', {data_values})''') return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')", "def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')", "def main(daymet_dir,pickles,start_date='1980-10-01',end_date='2020-09-30',huc_col = 'huc8', **kwargs):\r\n\tprint(f'The huc col being processed is: {huc_col}')\r\n\t################################################################\r\n\t#first do the daymet data \r\n\t#read in all the files in this dir and combine them into one df\r\n\tearly=FormatData(glob.glob(daymet_dir+f'*_12_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tmid=FormatData(glob.glob(daymet_dir+f'*_2_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tlate=FormatData(glob.glob(daymet_dir+f'*_4_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\t################################################################\r\n\t#next do the snotel data \r\n\toutput=[]\r\n\r\n\t#read in some pickled objects, these look like a list of dfs with each being a station for the full time period \r\n\tfor item in ['PREC','TAVG','WTEQ']:\r\n\t\t#get the pickled objects for each parameter \r\n\t\tfiles = glob.glob(pickles+f'*{item}_{start_date}_{end_date}_snotel_data_list') #hardcoded currently\r\n\t\tdf=FormatData(files,drop_cols=['year','month','day']).read_in_pickles()\r\n\t\toutput.append(df) #the df here is 365 days x ~30 yrs x 237 stations so these are pretty big dfs\r\n\t\r\n\t#join the three enviro params \r\n\toutput_df = reduce(lambda left,right: pd.merge(left,right,how='inner',on=['date','id']), output)\r\n\t\r\n\t\r\n\t#convert the temp column from F to C \r\n\toutput_df['TAVG'] = (output_df['TAVG']-32)*(5/9) \r\n\t#there are a couple of erroneous temp values, remove those \r\n\toutput_df = output_df.loc[output_df['TAVG'] <= 50]\r\n\r\n\t#convert prec and swe cols from inches to cm \r\n\toutput_df['PREC'] = output_df['PREC']*2.54\r\n\toutput_df['WTEQ'] = output_df['WTEQ']*2.54\r\n\t\r\n\t#remove rows that have one of the data types missing- this might need to be amended because \r\n\t#it means that there are different numbers of records in some of the periods. \r\n\toutput_df=output_df.dropna()\r\n\t\r\n\t#cast the snotel id col to int to add the hucs \r\n\toutput_df['id'] = output_df['id'].astype('int')\r\n\r\n\t#add the as yet nonexistant hucs data to the outputs \r\n\thucs = kwargs.get('hucs')\r\n\toutput_df[huc_col] = output_df['id'].map(hucs)\r\n\r\n\t#there are multiple snotel stations in some of the basins, \r\n\t#combine those so there is just one number per basin like the \r\n\t#daymet and RS data. \r\n\r\n\toutput_df=output_df.groupby([huc_col,'date'])[['PREC','WTEQ','TAVG']].mean().reset_index()\r\n\r\n\tperiod_list = []\r\n\tfor p1,p2 in zip(['early','mid','late'],[early,mid,late]): \r\n\t\t\t#get snotel first\r\n\t\t#make a temporal chunk of data \r\n\t\tsnotel_chunk=FormatData(None,time_period=p1).split_yearly_data(output_df)\r\n\r\n\t\t##########working below here\r\n\t\t############################\r\n\t\t#calculate the snow droughts for that chunk \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\t\t#print('snotel')\r\n\t\t\t#print(snotel_drought)\r\n\t\telse: \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',sort_col=huc_col).prepare_df_cols()\r\n\r\n\t\t#get cols of interest \r\n\t\t#snotel_drought=snotel_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t#rename cols so they don't get confused when data are merged \r\n\t\t#snotel_drought.columns=['huc8','year']+['s_'+column for column in snotel_drought.columns if not (column =='huc8') | (column=='year')]\r\n\t\t\r\n\t\t#then do the same for daymet \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\telse: \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,sort_col=huc_col).prepare_df_cols()\r\n\t\t#print('daymet',daymet_drought)\r\n\t\t#daymet_drought=daymet_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t\r\n\t\t#daymet_drought.columns=['huc8','year']+['d_'+column for column in daymet_drought.columns if not (column =='huc8') | (column=='year')]\r\n\r\n\t##########################################\r\n\t\r\n\t\t#run the kmeans with drought types as intiilization conditions (centroids) for the clusters\r\n\t\t\r\n\t\t#these are all of the huc 4 basins in the study area \r\n\t\thuc4s = ['1708','1801','1710','1711','1709','1701','1702','1705','1703','1601','1707','1706','1712','1704']\r\n\t\ts_output = []\r\n\t\td_output = []\r\n\t\tfor huc4 in huc4s: \r\n\t\t\thuc4_s = sd.prep_clusters(snotel_drought,huc4,huc_col=huc_col) #get the subset of the snow drought data for a given huc4\r\n\t\t\thuc4_d = sd.prep_clusters(daymet_drought,huc4,huc_col=huc_col)\r\n\t\t\t#make the centroids that serve as the intialization for the kmeans clusters- these are like endmembers (ish)\r\n\t\t\ts_centroids = DefineClusterCenters(huc4_s,'WTEQ','PREC','TAVG').combine_centroids() #makes a numpy array with four centroids\r\n\t\t\td_centroids = DefineClusterCenters(huc4_d,'swe','prcp','tavg').combine_centroids() #makes a numpy array with four centroids\r\n\r\n\t\t\t#clusters should be like: {0:dry, 1:warm, 2:warm_dry, 3:no_drought} 6/8/2021 DOUBLE CHECK\r\n\t\t\t#run kmeans for the snotel data\r\n\t\t\ts_clusters = sd.run_kmeans(huc4_s[['WTEQ','PREC','TAVG']].to_numpy(),huc4_s['label'],s_centroids)\r\n\t\t\ts_clusters = sd.add_drought_cols_to_kmeans_output(s_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\t\t\t#run kmeans for the daymet data \r\n\t\t\td_clusters = sd.run_kmeans(huc4_d[['swe','prcp','tavg']].to_numpy(),huc4_d['label'],d_centroids)\r\n\t\t\td_clusters = sd.add_drought_cols_to_kmeans_output(d_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\r\n\t\t\ts_output.append(s_clusters)\r\n\t\t\td_output.append(d_clusters)\r\n\t\ts_plot = pd.concat(s_output)\r\n\r\n\t\t#select the cols of interest and rename so there's no confusion when dfs are merged \r\n\t\ts_plot=s_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\ts_plot.columns=[huc_col,'year']+['s_'+column for column in s_plot.columns if not (column == huc_col) | (column=='year')]\r\n\r\n\t\td_plot = pd.concat(d_output)\r\n\t\td_plot=d_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\td_plot.columns=[huc_col,'year']+['d_'+column for column in d_plot.columns if not (column == huc_col) | (column=='year')]\r\n\t\r\n\t\t#merge the two datasets into one df \r\n\t\tdfs = s_plot.merge(d_plot,on=[huc_col,'year'],how='inner')\r\n\t\t\r\n\t\t#deal with the scenario that there are basins with less than 30 years of data, remove those here\r\n\t\tdfs = sd.remove_short_dataset_stations(dfs,huc_col)\r\n\t\tperiod_list.append(dfs)\r\n\r\n\tplot_counts(period_list,kwargs.get('stats_dir'),huc_col=huc_col,**kwargs)", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def extract_archive_data():\n extract_from_db_info = [\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_customer',\n 'dest_table': 'raw_customer_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_customer_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_product',\n 'dest_table': 'raw_product_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_product_archive.sql'\n },\n {\n 'source_db': 'ecommerce_db',\n 'dest_db': 'ecommerce_db',\n 'source_table': 'raw_sales',\n 'dest_table': 'raw_sales_archive',\n 'sql_select': None,\n 'sql_insert': '../sql/insert/insert_raw_sales_archive.sql'\n }\n ]\n\n for extract_info in extract_from_db_info:\n try:\n extract_data_from_db(extract_info['source_db'], extract_info['dest_db'], extract_info['dest_table'], extract_info['sql_select'], extract_info['sql_insert'])\n except Exception as e:\n print(\"An error occurred: \", e)\n else:\n print(\"Successfully inserted records in {} table of {} database from {} table of {} database.\".format(extract_info['dest_table'], extract_info['dest_db'], extract_info['source_table'], extract_info['source_db']))", "def import_stations(time_res='hourly',time_format='%Y%m%d%H',\r\n campaign_time=[datetime(2018,12,9), datetime(2018,12,12)],\r\n data_category='air_temperature', station_ids=['00044','00091'],\r\n dbase_dir='dbase', table_dir='tables',Output=True,\r\n memory_save=True):\r\n timeranges=['recent','historical']\r\n #%%load the datasets available at each timestep\r\n dwd_datasets_meta=dwd_datasets_meta=json.load(open(table_dir+\"\\\\dwd_station_meta.txt\"))\r\n #try to get a variable from the category, otherwise use interpolation of higher frequency data\r\n resample_frequency=None\r\n time_res_dbase=time_res\r\n try:\r\n dwd_datasets_meta[time_res][data_category]\r\n except Exception:\r\n if time_res=='daily':\r\n try:\r\n dwd_datasets_meta['hourly'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of hourly data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='hourly'\r\n except Exception:\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of 10_minutes data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n if time_res=='hourly':\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, hourly_mean of 10_minutes data used instead')\r\n resample_frequency='H'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n \r\n \r\n #%% download from dwd if necessary\r\n #connect to server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #get the mean time of the campaign\r\n date_mean=campaign_time[0]+(campaign_time[1]-campaign_time[0])/2 \r\n # load the inititial ds\r\n dbase_path=dbase_dir+'\\\\db_stations_'+time_res+'_'+data_category+'.nc'\r\n if os.path.exists(dbase_path):\r\n with xr.open_dataset(dbase_path) as dwd_dbase:\r\n dwd_dbase.load()\r\n print('Existing database imported')\r\n #get the non_nans stations\r\n current_stations=np.array(dwd_dbase[list(dwd_dbase.keys())[0]].sel(time=date_mean,method='nearest').dropna('STATIONS_ID').coords['STATIONS_ID'])\r\n else:\r\n print(dbase_path, 'does not exist, we create a new netcdf_file')\r\n dwd_dbase=xr.Dataset()\r\n current_stations=np.array((-9999)).reshape(1)\r\n #change directory on server\r\n for timerange in timeranges:\r\n archive_url='/climate_environment/CDC/observations_germany/climate/'+time_res_dbase+'/'+data_category+'/'+timerange \r\n ftp.cwd(archive_url)\r\n #get the archive\r\n for station_id in station_ids:\r\n #we check whether the station is in the database with this parameter already\r\n if int(station_id) in current_stations:\r\n print('Station', station_id, 'with category', data_category,'in ',timerange,'dbase already')\r\n continue\r\n try:\r\n archive_name=[s for s in ftp.nlst() if station_id in s][0]\r\n except:\r\n print('No ',timerange,'data for station',station_id)\r\n continue\r\n print('Retrieving {}...'.format(archive_name))\r\n retrieved = False\r\n archive = io.BytesIO()\r\n # try to retrieve file\r\n while not retrieved:\r\n try:\r\n ftp.retrbinary(\"RETR \" + archive_name, archive.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(archive_url)\r\n archive.seek(0)\r\n with ZipFile(archive) as myzip:\r\n for f in myzip.infolist():\r\n # This is the data file\r\n #print('zip content:', f.filename)\r\n if f.filename.startswith('produkt_'):\r\n product = io.StringIO(str(myzip.read(f.filename),'utf-8'))\r\n #get dataframe from product \r\n dwd_product=pd.read_csv(product,sep=';',skipinitialspace=True)\r\n #get datetime\r\n dwd_product['time']=pd.to_datetime(dwd_product['MESS_DATUM'],format=time_format) \r\n dwd_product=dwd_product.rename(columns=dwd_datasets_meta[time_res_dbase][data_category])\r\n dwd_product=dwd_product.reset_index()\r\n dwd_product=dwd_product.set_index(['time','STATIONS_ID'])\r\n dwd_product=dwd_product.drop(columns=['MESS_DATUM','quality_level_of_next_columns','end_of_record','index'])\r\n #append to database\r\n dwd_xr=dwd_product.to_xarray()\r\n #replace all values equal to -999 to nan\r\n for data_var in dwd_xr.data_vars:\r\n dwd_xr[data_var]=dwd_xr[data_var].where(dwd_xr[data_var]>-999)\r\n if station_id=='05009':\r\n print('ok') \r\n #only add relevant dates if available memoryis rather small\r\n \r\n if memory_save and timerange=='historical':\r\n dwd_xr=dwd_xr.sel(time=slice(campaign_time[0]-timedelta(days=1),campaign_time[1]+timedelta(days=1)))\r\n #dwd_xr=dwd_xr.squeeze()\r\n \r\n try:\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr])\r\n except Exception as e:\r\n print(e)\r\n print('try merging with compat=override')\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr],compat='override')\r\n print(archive_name,' added to database')\r\n #upscale to required temporal resolution\r\n if resample_frequency is not None:\r\n dwd_dbase=dwd_dbase.resample(time=resample_frequency).mean(skipna=True)\r\n print('DWD data upscaled to',time_res,'averages')\r\n if Output==True:\r\n dwd_dbase.to_netcdf(dbase_path)\r\n print('Updated database' ,dbase_path)\r\n return dwd_dbase", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)", "def import_temp_data(counties):\n for index, row in counties.iterrows():\n station = row[2]\n url = f'https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+0{station}+por+por+maxt+none+mave+5+01+F'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n table = soup.find('table')\n data = pd.read_html(str(table))\n df = data[0]\n df.columns = df.iloc[0]\n df = df.drop([0])\n df = df.iloc[-65:-8, :]\n df = df.rename(columns={'YEAR(S)': 'Year'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n df = df.set_index('Year')\n df = df.dropna(axis=1)\n df = df.replace(to_replace='-----', value=np.nan)\n df = df.astype('float64')\n df = df.fillna(df.mean().round(2))\n df = df.add_suffix('_t')\n name = row[0]\n df['County'] = name\n df.to_csv(f'{name}_avgmaxtemp.csv')\n print(f'Avg. max. temp. data from {name} saved')\n time.sleep(3.14)\n print('Done')", "def _get_data(self, gas, loc, voltage, speed, trial):\n cols = []\n for g in gas:\n for l in loc:\n try:\n (sub, files) = self._get_sensor_col_files(g, l)\n except OSError as e:\n print('{}\\n Keeping calm and carrying on.'.format(e))\n continue\n for v in voltage:\n for s in speed:\n end = \"_board_setPoint_%s_fan_setPoint_%s_mfc_setPoint_%sppm_p%s\" % (\n self.SensorVoltages[v],\n self.FanSpeeds[s],\n self.GasNames[g],\n self.AltLocs[l])\n filtered = [f.split('/')[-1] for f in files if f.endswith(end)]\n if not filtered:\n if self._args['verbose']:\n print('No valid files found for \"%s\", skipping!' % sub)\n continue\n timeStamp = [filt.split('_', 1)[0] for filt in filtered]\n date = [time.strptime(ts, '%Y%m%d%H%M') for ts in timeStamp]\n date = [time.strftime('%Y-%m-%d %H:%M', d) for d in date]\n filtered = [os.path.join(sub, f) for f in filtered]\n for i, filt in enumerate(filtered):\n j = i + 1\n if j in trial:\n p = os.path.sep.join([self.dataloc_prefix,\n self.data_location,\n filt])\n\n cols.append(SensorColumn(data_location=p,\n gas=self.GasNames[g],\n loc=self.Locs[l],\n voltage=self.SensorVoltages[v],\n speed=self.AltFanSpeeds[s],\n trial=j,\n _args=self._args))\n\n if self._args['verbose']:\n print('\\nSelected %i single trial SensorColumns!' % len(cols))\n return cols", "def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)", "def ingest():\n\n base_path = '/home/mnichol3/Coding/wx-scripts/wtlma'\n\n flash_files = ['flash-out-05232019-2050.txt',\n 'flash-out-05232019-2100.txt',\n 'flash-out-05232019-2110.txt',\n 'flash-out-05232019-2120.txt',\n 'flash-out-05232019-2130.txt',\n 'flash-out-05232019-2140.txt',\n 'flash-out-05232019-2150.txt']\n\n df_cols = ['start', 'end', 'duration', 'area', 'ctr_alt', 'ctr_lat', 'ctr_lon',\n 'tot_energy']\n\n flash_df = pd.read_csv(join(base_path, flash_files[0]), sep=',', names=df_cols)\n\n for f in flash_files[1:]:\n curr_path = join(base_path, f)\n curr_df = pd.read_csv(curr_path, sep=',', names=df_cols)\n flash_df = pd.concat([flash_df, curr_df], ignore_index=True)\n\n return flash_df", "def _setData(self):\n\n if not self.stationId:\n return\n \"\"\" \n # get the ressource url and adjust lat and lon from data portal\n query = sparqls.stationResource(self.stationId)\n key, val = RunSparql(query, 'array').run()\n if val: \n self.url = val[0][0]\n self.lat = float(val[0][2])\n self.lon = float(val[0][3])\n \"\"\"\n\n # it is possible, that a station id has multiple URI\n # ask for all URI\n query = sparqls.stationData(self.uri, 'all')\n data = RunSparql(query, 'pandas').run()\n\n if not data.empty:\n self._data = data\n else:\n self._data = 'no data available'\n\n # check if data is available and extract the 'unique' data products\n if isinstance(self._data, pd.DataFrame):\n p = self._data['specLabel'].unique()\n self._products = pd.DataFrame(p)\n\n # replace samplingheight=None with empty string\n self._data.samplingheight.replace(to_replace=[None], value=\"\", inplace=True)\n else:\n self._products = 'no data available'", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def test_seed_station_information(self):\n\t\tget_info.seed_station_information()\n\n\t\tMacDougal_Prince = db.session.query(Station).filter(Station.id == 128).one()\n\t\tself.assertTrue(MacDougal_Prince, 'Station at MacDogual/Pride did not get sucessfully added.')\n\n\t\tself.assertEqual(MacDougal_Prince.num_bikes_available, 0, 'Bike counts were not initialized properly')\n\t\tself.assertEqual(MacDougal_Prince.num_docks_available, 0, 'Dock counts were not initialized properly')", "def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes", "def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.", "def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()", "def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations", "def generate_training_testing_dataset(store_id, transactions, meteo_day, max_days=2500,\n single_barcode=0):\n\n # Get the minimum and maximum of date in the transactions\n min_date = transactions[(transactions['STO_EAN'] == store_id)].min()['TRX_DATETIME'].date()\n max_date = transactions[(transactions['STO_EAN'] == store_id)].max()['TRX_DATETIME'].date()\n\n # Get the number of days between the two date\n num_days = (max_date - min_date).days\n\n # Get the list of unique products barcode in the transactions\n products_barcode = transactions['BARCODE'].unique()\n\n # Only do one single barcode if activated\n if single_barcode is not None:\n products_barcode = [products_barcode[single_barcode]]\n\n\n # Array to contain all training data\n all_data_first_level = []\n\n # For each day and for each product\n for day in xrange(num_days):\n\n print(day)\n\n # If we have already considered more days than allowed, stop\n if day > max_days:\n break\n\n\n # Get the date corresponding to this day\n date = min_date + pd.DateOffset(day)\n # Get the weather of the date\n weather = get_weather_on_date(date, meteo_day, store_id).head(n=1)\n\n # If the weather is empty we skip this day\n if weather.empty:\n continue\n\n # For each product to include\n for product_barcode in products_barcode:\n\n # Get the volume and inventory data\n volume = get_volume_product_on_date(product_barcode, date, store_id, transactions)\n\n # If no volume could be found skip this date,product pair\n if volume is None:\n continue\n\n # Get the type of the current date\n day_type = generate_day_type(date)\n\n\n # Generating complex features based on the simpler one\n\n # This contains respectively yesterday, the day before yesterday and the same day as current one in\n # previous week\n yesterday = date - pd.DateOffset(1)\n two_days_ago = date - pd.DateOffset(2)\n one_week_ago = date - pd.DateOffset(7)\n\n # Get the day type of yesterday and 2 days ago\n day_type_yesterday = generate_day_type(yesterday)\n day_type_2days_ago = generate_day_type(two_days_ago)\n\n # Get the volume of yesterday, 2days ago and 1 week ago\n volume_yesterday = get_volume_product_on_date(product_barcode, yesterday, store_id, transactions)\n volume_2days_ago = get_volume_product_on_date(product_barcode, two_days_ago, store_id, transactions)\n volume_one_week_ago = get_volume_product_on_date(product_barcode, one_week_ago, store_id, transactions)\n\n\n # Get the total sales and the total weight of product done yesterday, 2 days ago and 1 week ago\n volume_price_yesterday = 0\n volume_weight_yesterday = 0\n if volume_yesterday is not None:\n volume_price_yesterday = volume_yesterday[\"price\"]\n volume_weight_yesterday = volume_yesterday[\"weight\"]\n\n volume_price_2days_ago = 0\n volume_weight_2days_ago = 0\n if volume_2days_ago is not None:\n volume_price_2days_ago = volume_2days_ago[\"price\"]\n volume_weight_2days_ago = volume_2days_ago[\"weight\"]\n\n volume_price_one_week_ago = 0\n volume_weight_one_week_ago = 0\n if volume_one_week_ago is not None:\n volume_price_one_week_ago = volume_one_week_ago[\"price\"]\n volume_weight_one_week_ago = volume_one_week_ago[\"weight\"]\n\n\n\n # Using historical weather data\n weather_yesterday = get_weather_on_date(yesterday, meteo_day, store_id).head(n=1)\n temperature_min_yesterday = 0\n temperature_max_yesterday = 0\n if not weather_yesterday.empty:\n temperature_min_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n temperature_max_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n\n\n #tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n # weather['PRECIPITATION_VALUE'].values[0], weather['SUNSHINE_DURATION'].values[0],\n # weather['SNOW_DEPTH'].values[0], day_type, volume[\"price\"], volume[\"weight\"]]\n\n\n # Saving Features\n tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n day_type, volume[\"price\"], volume_price_yesterday,volume_weight_yesterday,\n volume_price_2days_ago, volume_weight_2days_ago,\n volume_price_one_week_ago, volume_weight_one_week_ago, temperature_min_yesterday,\n temperature_max_yesterday,day_type_yesterday, day_type_2days_ago,\n volume[\"weight\"]]\n\n all_data_first_level.append(tmp)\n\n return all_data_first_level", "def _populate_output(self):\n self._store_atomic_queries_table()\n self._store_composite_queries_table()", "def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)", "def padova_interpolated_isomake(directories, bands_dict, output_filename,\n bands_ordered=None):\n\n if isinstance(directories, basestring):\n directories = [directories]\n\n if bands_ordered is None:\n bands_ordered = bands_dict.values()\n\n output_obj = open(output_filename, \"w\")\n\n header_string = \"#\\t[M/H]\\tMi\\tlogAge\\tlogTe\\tlogg\\tJacobian\"\n for band in bands_ordered:\n header_string += \"\\t{}\".format(band)\n header_string += \"\\tinner_count\\touter_count\\n\"\n output_obj.write(header_string)\n\n iso_metal_dict = {}\n bands_metal_dicts = {}\n for band in bands_dict.keys():\n bands_metal_dicts[band] = {}\n\n # instead do this on band-by-band basis? *******************\n\n for direc in directories:\n iso_files_gz = gb.glob(\"{}/*.dat.gz\".format(direc.rstrip(\"/\")))\n iso_files = gb.glob(\"{}/*.dat\".format(direc.rstrip(\"/\")))\n\n # check for metallicity of each file\n # and check which bands it has\n\n for iso_file1 in iso_files_gz:\n metal = None\n iso_data = gz.open(\"{0}\".format(iso_file1))\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for iso_file1 in iso_files:\n metal = None\n iso_data = open(\"{0}\".format(iso_file1), \"r\")\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for metal in bands_metal_dicts[bands_metal_dicts.keys()[0]]:\n filenames = []\n for band in bands_metal_dicts:\n if metal in bands_metal_dicts[band]:\n if bands_metal_dicts[band][metal] not in filenames:\n filenames.append(bands_metal_dicts[band][metal])\n else:\n break\n else:\n iso_metal_dict[metal] = filenames\n\n print(iso_metal_dict)\n keys = iso_metal_dict.keys()\n keys.sort()\n\n if len(keys) > 2:\n # iso_metal_weights=dict(zip(keys, np.gradient(np.array(keys)) ) )\n # in numpy 1.9.0 gradient has changed to use second order behaviour\n # at boundaries which gives wrong results in this context\n iso_metal_weights = dict(zip(keys,\n replacement_gradient(np.array(keys))))\n else:\n iso_metal_weights = dict(zip(keys, np.ones(len(keys))))\n print(\"metals and weights: \", iso_metal_weights)\n\n# interp in metallicity order\n\n for key in keys:\n iso_interp(iso_metal_dict[key], key, iso_metal_weights[key],\n output_obj, bands_dict, bands_ordered)\n\n output_obj.close()", "def ddf_parser():\n num_available, total = 0, 0\n indicator_twn_tuples = list() # format of a single tuple: (indicator_name, #twn rows, earliest available year)\n concept_metadata = dict() # {top_tag: second_layer_tag:\n\n # parse all ddf files provided by GapMinder and find how many of them with Taiwan statistics\n for f_path in glob.glob(os.path.join('statistics', '*datapoints*.csv')):\n total += 1\n df = pd.read_csv(f_path)\n if 'twn' in df.geo.unique():\n num_available += 1\n indicator = f_path.replace('statistics/ddf--datapoints--', '').replace('--by--geo--time.csv', '')\n # print('[Indicator]', indicator)\n print(f\"\\t{len(df[df.geo == 'twn'])} indicators including Taiwan statistics.\")\n\n # stat_name = df.columns[-1]\n # df_p = df.pivot(index='geo', columns='time')[stat_name]\n # df_p.insert(loc=0, column='indicator', value=stat_name)\n # df_p.to_csv(f'statistics_transformed/{stat_name}.csv', sep=';')\n\n indicators.append(indicator)\n\n\n # print(\"{:.1f}% datapoints have Taiwan statistics\".format(num_available / float(total) * 100))\n\n\n\n df_c = pd.read_csv(CONCEPT_CSV_PATH)\n df_t = pd.read_csv(TAG_CSV_PATH)\n df = pd.merge(df_c, df_t, how='left', left_on='tags', right_on='tag')\n for idr, num_rows, earliest_year in indicator_twn_tuples:\n ancestors = list()\n\n row_values = df[df['concept'] == idr].values[0]\n name_catalog, parent, ancestor = (row_values[i] for i in [9, 17, 18])\n if type(parent) is str:\n ancestors.append(parent)\n\n # get ancestors recursively\n while type(ancestor) is str:\n tag_row_values = df_t[df_t['tag'] == ancestor].values[0]\n ancestors.append(tag_row_values[1])\n ancestor = tag_row_values[2]\n\n # build concept structure\n ancestors.insert(0, name_catalog)\n print('/'.join(ancestors[::-1]))", "def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def write_stations_to_metro_stations_table(config, tables_cache):\r\n database = deepcopy(config[\"database\"])\r\n metro_stations_table = deepcopy(config[\"tables\"][\"metro_stations_table\"])\r\n in_tests.test_write_to_database_from_dict(\r\n database, metro_stations_table, tables_cache)\r\n\r\n station_id = tables_cache[\"address_metro_stations_station_id\"]\r\n station_name = tables_cache[\"address_metro_stations_station_name\"]\r\n line_name = tables_cache[\"address_metro_stations_line_name\"]\r\n station_lat = tables_cache[\"address_metro_stations_lat\"]\r\n station_lng = tables_cache[\"address_metro_stations_lng\"]\r\n\r\n if station_id:\r\n write_to_database(database, metro_stations_table, {\r\n \"station_id\": station_id,\r\n \"station_name\": station_name,\r\n \"line_name\": line_name,\r\n \"station_lat\": station_lat,\r\n \"station_lng\": station_lng\r\n })\r\n return ()", "def load_data(city, month, week_day):\n# name for day variable changed from \"day_name\" into week_day to take into account new pandas method \".day_name()\"\n# read in file form selected city\n df = pd.read_csv(CITY_DATA[city])\n# create additional columns for months, days, start/ end times, hours and station combinations\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month_start'] = df['Start Time'].dt.month\n df['month_end'] = df['End Time'].dt.month\n df['day_start'] = df['Start Time'].dt.day_name()\n df['day_end'] = df['End Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n df['station_comb'] = df['Start Station'] + ' &AND& ' + df['End Station']\n# filter data file by month: capture start and end months\n if month != 7:\n df1 = df[df['month_start'] == month]\n df2 = df1.append(df[df['month_end'] == month])\n df = df2.drop_duplicates()\n# filter data file by day: capture start and end days\n if week_day != 'All':\n df3 = df[df['day_start'] == week_day]\n df4 = df3.append(df[df['day_end'] == week_day])\n df = df4.drop_duplicates()\n# reset index to facilitate looping in station_stats function\n df = df.reset_index()\n# check if user wants to check first data lines\n req_check_df = input('\\nIf you want to check the selected data please enter y.')\n if req_check_df[0:1].lower() == 'y':\n print('check df = \\n', df.head())\n wait = input('Press Enter to continue. ')\n\n return df", "def load_vals_berkeley(s):\n \n download_dates = s.berk.download_dates\n download_dates_oth = s.berk.download_dates_oth\n sensor_swap_date = s.berk.sensor_swap_date\n \n dfs = {'indoor':{}}\n \n # outdoor temp\n dfs['outdoor'] = get_outdoor_data(s.berk.temps_dir,'berk')\n \n # base directory for indoor temp measurements\n indoor_temp_dir = join(s.berk.temps_dir,'indoor')\n\n print(\"Downloading temps: {}...\".format(download_dates[0]))\n \n # grab room temp data from both control and treatment rooms\n for gi,g in enumerate(['control','treatment']):\n this_df = dfs['indoor'][g] = {}\n \n ## download data from early in experiment when we were using different sensors\n this_df['partition'] = pd.read_excel(join(indoor_temp_dir,download_dates[0].strftime('%Y%m%d'),'{}_p.xls'.format(g)),\n sheet_name='Records',parse_dates=True,index_col=0).loc[:pd.to_datetime(sensor_swap_date),:]\n \n this_df['RA'] = pd.read_excel(join(indoor_temp_dir,download_dates[0].strftime('%Y%m%d'),'{}_RA.xls'.format(g)),\n sheet_name='Data Table',parse_dates=True,index_col=1,header=21).iloc[:,1].loc[:pd.to_datetime(sensor_swap_date)]\n this_df['RA'].name = 'T'\n this_df['RA'] = pd.DataFrame(this_df['RA'])\n this_df['RA']['RH'] = np.nan\n \n for loc in ['partition','RA']:\n this_df[loc].columns = ['T','RH']\n this_df[loc].index.name='time'\n \n ## now download data from sensors we switched to\n for d in download_dates[1:]:\n csv_dir = join(indoor_temp_dir,d.strftime('%Y%m%d'),'csvs')\n print(\"Downloading temps: {}...\".format(d))\n for loc in [('partition','p'),('RA','RA')]:\n fpath = join(csv_dir,'{}_{}.csv'.format(g,loc[1]))\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], loc[0], sensor_swap = sensor_swap_date)\n \n ## add individual temp/RH \n for s_ix in range(1,7):\n if isfile(join(csv_dir,'{}_{}.csv'.format(g,s_ix))):\n fpath = join(csv_dir,'{}_{}.csv'.format(g,s_ix))\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], str(s_ix))\n \n ## add operative temp\n if isfile(join(csv_dir,'{}_ot.csv'.format(g))):\n fpath = join(csv_dir, '{}_ot.csv'.format(g))\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['Top'], 'Top')\n \n ## add CO2\n for d in download_dates_oth:\n print(\"Downloading co2: {}...\".format(d))\n csv_dir = join(s.berk.other_dir,d.strftime('%Y%m%d'))\n \n # pass when the file doesn't exist (aka when\n # the CO2 sensor's batteries died\n fpath = join(csv_dir,'{}_co2.csv'.format(g))\n if not isfile(fpath):\n continue\n \n # otherwise, parse\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['co2'], 'co2')\n \n this_df = drop_duplicates_and_flags(this_df)\n \n return dfs", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def load_vals_bus(s):\n \n download_dates = s.bus.download_dates\n \n dfs = {'indoor':{}}\n \n dfs['outdoor'] = get_outdoor_data(s.bus.temps_dir,'bus')\n\n # grab room temp data from both control and treatment rooms\n for gi,gx in enumerate([('Cool','control'),('Warm','treatment')]):\n ga = gx[0]\n g = gx[1]\n tx_lab = ga[0].upper()\n \n \n print('Downloading {} room data...'.format(g))\n\n this_df = dfs['indoor'][g] = {}\n \n ## now download data from sensors we switched to\n for dx,d in enumerate(download_dates):\n \n d_str = d.strftime('%Y%m%d')\n dirname = join(s.bus.temps_dir,'indoor','{}_{}'.format(d_str,ga))\n print('Downloading {}...'.format(d_str))\n \n for loc in [('far','F'),('near','N')]:\n print('Downloading {} sensor...'.format(loc[0]))\n \n # need to use glob because some files have two \".\"s and some have one\n fname = '{}_Temp_{}{}.*csv'.format(d_str,tx_lab,loc[1])\n fpath = join(dirname, fname)\n files = glob(fpath)\n fpath = files[0]\n \n # check for missing files\n if len(files) != 1:\n with warnings.catch_warnings():\n warnings.simplefilter('always')\n warnings.warn('Missing file: {}'.format(fname))\n continue\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], loc[0])\n \n ## add individual data\n for s_ix in range(1,7):\n fname = '{}_Temp_{}{}.csv'.format(d_str,tx_lab,s_ix)\n fpath = join(dirname, fname)\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], str(s_ix))\n \n ## add T_operative\n fname = '{}_PingPong_{}.csv'.format(d_str,ga)\n fpath = join(dirname, fname)\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['Top'], 'Top')\n \n ## add CO2\n fname = '{}_{}_co2.csv'.format(d_str,g)\n fpath = join(dirname, fname)\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['co2'], 'co2')\n\n this_df = drop_duplicates_and_flags(this_df)\n \n return dfs", "def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def loadDBstation(dbfile,stationID,varname,timeinfo=None,filttype=None,cutoff=3600.0,output_meta=False):\r\n from netcdfio import queryNC\r\n \r\n outvar = ['NetCDF_Filename','NetCDF_GroupID','StationName']\r\n tablename = 'observations'\r\n #condition = 'Variable_Name = \"%s\" and StationID = \"%s\"' % (varname,stationID)\r\n condition = 'Variable_Name = \"%s\" and StationID LIKE \"%%%s\"' % (varname,stationID)\r\n \r\n print 'Querying database...'\r\n print condition\r\n data, query = queryNC(dbfile,outvar,tablename,condition) \r\n \r\n if len(data)==0:\r\n print '!!! Warning - Did not find any stations matching query. Returning -1 !!!'\r\n return -1\r\n else:\r\n ts = timeseries(data[0]['time'],data[0][varname].squeeze())\r\n \r\n \r\n if not timeinfo==None:\r\n print 'Interpolating station data between %s and %s\\n'%(timeinfo[0],timeinfo[1])\r\n tnew,ynew = ts.interp((timeinfo[0],timeinfo[1],timeinfo[2]))\r\n ts = timeseries(tnew,ynew)\r\n ts.dt = timeinfo[2] # This needs updating\r\n \r\n if not filttype==None:\r\n print '%s-pass filtering output data. Cutoff period = %f [s].'%(filttype,cutoff)\r\n yfilt = ts.filt(cutoff,btype=filttype,axis=-1)\r\n ts.y = yfilt.copy()\r\n \r\n if output_meta:\r\n if data[0].has_key('elevation'):\r\n ele = data[0]['elevation']\r\n else:\r\n ele = 0.0\r\n meta = {'longitude':data[0]['longitude'],'latitude':data[0]['latitude'],'elevation':ele,'StationName':query['StationName'][0]}\r\n return ts, meta \r\n else:\r\n return ts", "def dataLoader(stationDict, startDate, endDate):\n\n # Generate a URL\n url = ('https://waterservices.usgs.gov/nwis/dv/?format=json' +\n # Specify the sites to download\n '&sites=' + stationDict['DatasetExternalID'] +\n # Specify the start date\n '&startDT=' + datetime.strftime( startDate, '%Y-%m-%d' ) +\n #Specify the end data\n '&endDT=' + datetime.strftime( endDate, '%Y-%m-%d' ) +\n # Specify that we want streamflow\n '&parameterCd=00060' +\n # Specify that we want daily means\n '&statCd=00003' +\n # Allow all sites\n '&siteStatus=all' )\n \n # Get the data\n response = requests.get(url)\n\n # Check the status code\n if response.status_code != 200:\n return \n else:\n response = response.json()\n \n # Create a dataframe from the data\n df = pd.DataFrame(response['value']['timeSeries'][0]['values'][0]['value'])\n\n # Set the index to the dateTime index\n df.set_index(pd.DatetimeIndex(pd.to_datetime(df['dateTime'])), inplace = True)\n del df['dateTime'] # Delete the redundant column\n\n # Replace missing data with NaN's\n df['value'].replace(to_replace = '-999999', value = np.nan, inplace = True)\n\n # Convert to numeric\n df['value'] = pd.to_numeric(df['value'])\n \n # Remove any duplicate data in the dataset\n df = df[~df.index.duplicated(keep='last')] # Remove duplicates from the dataset\n df = df[~df.index.isnull()]\n\n # Rename the columns\n df.columns = ['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag', 'USGS | ' + stationDict['DatasetExternalID'] + ' | Streamflow | CFS']\n del df['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag']\n\n # Return the data frame\n return df", "def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def to_WTH_converter(self, weather_data, dest_dir):\n ds_all = weather_data.get_global_dataset()\n if self.country is None:\n print(\"Country given is erroneous:\")\n return\n elif self.country == \"globe\":\n lon_num_start = 0\n lon_num_stop = weather_data.get_num_of_attribute('longitude')\n lat_num_start = 0\n lat_num_stop = weather_data.get_num_of_attribute('latitude')\n else:\n lon_num_start, lon_num_stop, lat_num_start, lat_num_stop = weather_data.get_country_boundary(self.country)\n\n\n # top bottom, left to right\n lon_num_start = 397\n lat_num_start = 184\n for lon_i in range(lon_num_start, lon_num_stop + 1):\n # for lon_i in range(lon_num_start, lon_num_stop+1):\n lon = ds_all.longitude.isel(longitude=lon_i).values.tolist()\n\n for lat_i in range(lat_num_start, lat_num_stop+1):\n # for lat_i in range(lat_num_start, lat_num_stop + 1):\n lat = ds_all.latitude.isel(latitude=lat_i).values.tolist()\n\n # create a dynamic header with updated LON, LAT info and move it into the folder given\n wth_header_u = ut.format_header(lat_i + 1, lon_i + 1, lat, lon)\n wth_header = dest_dir + \"/\" + wth_header_u\n shutil.move(wth_header_u, wth_header)\n\n # open in appending mode\n fwth = open(wth_header, \"a+\")\n\n # loop through daily weather data\n for t, date in enumerate(self.years):\n daily_data_vars = ut.get_daily_data_vars(ds_all, lat_i, lon_i, t)\n # disregard all NAN values\n if daily_data_vars is None:\n fwth.close()\n os.remove(wth_header)\n break\n\n if t == 0:\n ut.update_table(wth_header_u, lat, lon)\n\n entry = ut.format_data_vars_entry(daily_data_vars, date)\n\n # append this entry into the file\n fwth.write(entry)\n print(\"Added entry:\", entry)\n\n # close file after writing\n fwth.close()\n print(\"Output WTH:\", wth_header)", "def load_data(self, identifier):\n propertyType = self.soup.find('h2', class_='listing-details-h1').text\n propertyType = propertyType.replace(' for sale', '')\n beds = re.findall(r'\\d{1} bed', propertyType)\n if beds == []:\n # get first word\n beds = '1 bed'\n else:\n beds = beds[0]\n # get general info of property\n propertyType = propertyType.replace(str(beds) + ' ', '')\n address = self.soup.find('h2', {'itemprop':'streetAddress'}).text \n postCode = address.rsplit(None, 1)[-1]\n address = address.replace(postCode, '')\n latLong = self.soup.find('meta', {'itemprop':'latitude'}).attrs\n latitude = latLong['content']\n latLong = self.soup.find('meta', {'itemprop':'longitude'}).attrs\n longitude = latLong['content']\n # original price and changes to price are in sidebar\n for sidebar in self.soup.find_all('div', {\"class\":sCLASS_SECTION}):\n count = 0\n # price info in sidebar called 'Listing history' \n if sidebar.find(text=sSB_NAME):\n try:\n originalPriceAndDate = sidebar.find(text=sFIRST_LISTED).next_element.next_element\n originalPrice = originalPriceAndDate[:originalPriceAndDate.find(' on')]\n originalPrice = self.find_number(originalPrice)\n originalDate = originalPriceAndDate[originalPriceAndDate.find(' on') + 3:]\n originalDate = self.get_date(originalDate)\n # store original listing - in same order as headers!\n result = [identifier + \"_\" + str(count), originalDate, np.NaN, originalPrice, np.NaN, \n beds, propertyType, postCode, address, latitude, longitude, self.url]\n series = pd.Series(result, name=identifier + \"_\" + str(count), index=headers)\n# self.df = self.df.append(pd.Series(result, index=headers), ignore_index=True) \n# self.df = self.df.append(series) \n# self.df = pd.concat([self.df, series])\n self.df = self.df.append(series, ignore_index=False)\n except (AttributeError, UnboundLocalError):\n print('Error viewing this property')\n pass\n try:\n # store any changes to original listing\n for changes in sidebar.find_all('ul', {'class':sCHANGES}):\n for change in changes.find_all('li'):\n count += 1\n # get date and new price\n date = change.find('span').text\n date = date.replace('Reduced on:', '')\n date = date.replace('\\n', '')\n date = self.get_date(date)\n newPrice = self.find_number(change.next_element)\n # store result in order of headers\n result = [identifier + \"_\" + str(count), originalDate, date, originalPrice, newPrice, \n beds, propertyType, postCode, address, latitude, longitude, self.url]\n series = pd.Series(result, name=identifier + \"_\" + str(count), index=headers) \n# self.df = self.df.append(pd.Series(result, index=headers), ignore_index=True)\n self.df = self.df.append(series, ignore_index=False)\n# self.df = pd.concat([self.df, series])\n except (AttributeError, UnboundLocalError):\n print('No changes')\n pass", "def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)", "def _add_data(self, model_stations: Iterable[model.Station],\n validate_prefix: str = \"\") -> int:\n valid_station_count = 0\n jreast_merged_codes: dict[model.StationID, str] = load_csv_as_mapping(\n DIR_CURATED / \"jreast_merged_codes.csv\",\n itemgetter(\"sta_id\"),\n itemgetter(\"code\")\n )\n\n # Add data from model stations\n for model_sta in model_stations:\n is_invalid = False\n should_validate = model_sta.id.startswith(validate_prefix)\n\n # Find a matching geo_sta\n geo_sta = self.by_id.get(model_sta.id)\n if not geo_sta:\n if should_validate:\n self.logger.critical(f\"{Color.RED}geo.osm is missing station \"\n f\"{Color.MAGENTA}{model_sta.id}{Color.RESET}\")\n self.valid = False\n continue\n\n # Find a name\n name_id = last_part(geo_sta.id)\n geo_sta.name = self.names.get(name_id)\n if geo_sta.name is None and should_validate:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Copy stop_code\n geo_sta.code = model_sta.code\n\n # Check if station was valid\n if is_invalid:\n self.valid = False\n elif should_validate:\n valid_station_count += 1\n\n # Generate codes and names for mother stations\n for sta in self.by_id.values():\n if not sta.children:\n continue\n\n name_id = last_part(sta.id)\n sta.name = self.names.get(name_id)\n if not sta.name:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Get children codes\n children_codes = []\n jreast_merged_code = jreast_merged_codes.get(sta.id)\n if jreast_merged_code:\n children_codes.append(jreast_merged_code)\n\n for child in sta.children:\n # Ignore JR-East child codes if there's a JR-East merged code\n if child.id.startswith(\"JR-East\") and jreast_merged_code:\n continue\n elif child.code:\n children_codes.append(child.code)\n\n sta.code = \"/\".join(children_codes)\n\n return valid_station_count", "def init_parameters(self, buildings, target_meters):\n if self.df is None:\n self.read_data_from_csv()\n\n if buildings is None:\n buildings = self.find_all_houses().tolist()\n\n if target_meters is None:\n target_meters = self.meter_name.keys()\n \n return buildings, target_meters", "def archive_mds_data(self, lmtdb):\n\n dataset_names = [\n 'mdservers/cpuload',\n ]\n\n self.init_datasets(dataset_names, lmtdb.mds_names)\n\n # Now query the MDS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_mds_data(self.query_start, self.query_end_plusplus)\n\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'MDS_ID', 'PCT_CPU']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.mds_id_map[row[col_map['MDS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def update_static_data(source: list):\r\n connect = sqlite3.connect(\"REDB_v2.sqlite\")\r\n cursor = connect.cursor()\r\n districts = []\r\n streets = []\r\n series = []\r\n amenities = []\r\n building_types = []\r\n for realestate in source:\r\n if realestate.street not in streets:\r\n streets.append(realestate.street)\r\n if realestate.series not in series:\r\n series.append(realestate.series)\r\n if realestate.building not in building_types:\r\n building_types.append(realestate.building)\r\n if realestate.district not in districts:\r\n districts.append(realestate.district)\r\n if realestate.amenities not in amenities:\r\n amenities.append(realestate.amenities)\r\n for item in streets:\r\n try:\r\n cursor.execute(\"INSERT INTO Streets (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in districts:\r\n try:\r\n cursor.execute(\"INSERT INTO Districts (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in series:\r\n try:\r\n cursor.execute(\"INSERT INTO Series (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in amenities:\r\n try:\r\n cursor.execute(\"INSERT INTO Amenities (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in building_types:\r\n try:\r\n cursor.execute(\"INSERT INTO Buildings (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n connect.commit()\r\n connect.close()", "def set_up_file(ds, total_sites, footprint_nbins,\n dimy, dimx, wrf_out, time_unit, site_names):\n if site_names.dtype.str[1] not in \"SU\":\n site_names = site_names.astype(\"S\")\n if site_names.dtype.str[2:] != \"1\":\n site_names = netCDF4.stringtochar(site_names)\n name_length = site_names.shape[-1]\n name_str_dim = \"string{len:d}\".format(len=name_length)\n\n ds.createDimension(\"observation_time\", 0)\n ds.createDimension(\"site\", total_sites)\n ds.createDimension(\"time_before_observation\", footprint_nbins)\n ds.createDimension(\"dim_y\", dimy)\n ds.createDimension(\"dim_x\", dimx)\n ds.createDimension(\"bnds2\", 2)\n ds.createDimension(name_str_dim, name_length)\n\n obs_time_var = ds.createVariable(\"observation_time\", \"f4\",\n (\"observation_time\",))\n obs_time_bounds_var = ds.createVariable(\"observation_time_bnds\", \"f4\",\n (\"observation_time\", \"bnds2\"))\n time_back_var = ds.createVariable(\"time_before_observation\", \"i2\",\n (\"time_before_observation\",))\n time_back_bounds_var = ds.createVariable(\n \"time_before_observation_bnds\", \"i2\",\n (\"time_before_observation\", \"bnds2\"))\n ds.createVariable(\"dim_y\", \"f4\", (\"dim_y\",))\n ds.createVariable(\"dim_y_bnds\", \"f4\", (\"dim_y\", \"bnds2\"))\n ds.createVariable(\"dim_x\", \"f4\", (\"dim_x\",))\n ds.createVariable(\"dim_x_bnds\", \"f4\", (\"dim_x\", \"bnds2\"))\n\n grid_mapping = create_grid_mapping(ds, wrf_out)\n\n site_name_var = ds.createVariable(\"site_names\", \"S1\",\n (\"site\", name_str_dim))\n # these are roughly 1MB for a three-week lag\n flux_time_var = ds.createVariable(\n \"flux_time\", \"f4\", (\"observation_time\", \"time_before_observation\"),\n fill_value=-255,\n zlib=True)\n flux_time_bounds_var = ds.createVariable(\n \"flux_time_bnds\", \"f4\",\n (\"observation_time\", \"time_before_observation\", \"bnds2\"),\n fill_value=-255,\n zlib=True)\n\n height_var = ds.createVariable(\"height\", \"f4\", ())\n height_bounds_var = ds.createVariable(\"height_bnds\", \"f4\", (\"bnds2\",))\n\n infl_fun_var = ds.createVariable(\n # Empirically, the most particles seen in a grid cell is\n # around 5e3. 35*180 particles/obs_time, 9 files/flux_time on 81km grid\n # We have a factor of six wiggle room with i2\n # u2 may be necessary for 3 hourly 243 km fluxes\n # or more particles/obs_time0\n \"H\", \"i2\",\n (\"observation_time\", \"site\", \"time_before_observation\",\n \"dim_y\", \"dim_x\"),\n zlib=True,\n # This will be written and read by flux time, usually,\n # so that chunksize should be 1\n # not sure if chunk should be total_sites or 1 for site dimension\n # total_size gives a chunk as around 5.3 MiB\n # setting this to 1 may help with file size\n # if some towers were not run all the time\n # NUG has default chunk size of 4 MiB\n # (roughly a disk read on a high-end system)\n chunksizes=(1, total_sites, 1, dimy, dimx),\n # This requires that every cell be written to.\n # This is my intent, and this (as opposed to fill_value=0)\n # will not have troubles with masking most of the domain.\n # Make sure this isn't what's inflating the size\n fill_value=-1,\n )\n\n lpdm_opts = ds.createVariable(\"lpdm_configuration\", \"i1\", ())\n lpdm_opts.setncatts({key: netcdf_compatible_array(config[key]).copy()\n for key in config.dtype.fields.keys()\n if key.islower()})\n\n wrf_opts = ds.createVariable(\"wrf_configuration\", \"i1\", ())\n with contextlib.closing(netCDF4.Dataset(\n config[\"wrf_file\"][0].decode(\"ascii\"))) as wrf_ds:\n wrf_opts.setncatts({att: wrf_ds.getncattr(att)\n for att in wrf_ds.ncattrs()})\n\n ########################################################\n\n obs_time_var.setncatts(dict(long_name=\"observation_time\",\n # not entirely sure this applies...\n standard_name=\"forecast_reference_time\",\n bounds=\"observation_time_bnds\",\n units=time_unit,\n calendar=CALENDAR,\n coverage_content_type=\"coordinate\",\n # might be a misapplication of CF 9.5\n cf_role=\"timeseries_id\"))\n obs_time_bounds_var.setncatts(dict(long_name=\"observation_time_bounds\",\n units=time_unit,\n calendar=CALENDAR))\n\n time_back_var.setncatts(dict(long_name=\"time_before_observation\",\n standard_name=\"forecast_period\",\n units=\"hours\",\n bounds=\"time_before_observation_bnds\",\n coverage_content_type=\"coordinate\",\n ))\n time_back_bounds_var.setncatts(dict(\n description=\"bounds of time_before_observation\",\n units=\"hours\"))\n\n flux_time_var.setncatts(dict(\n long_name=\"flux_time\",\n standard_name=\"time\",\n bounds=\"flux_time_bnds\",\n units=time_unit,\n calendar=CALENDAR,\n coverage_content_type=\"coordinate\",\n ))\n flux_time_bounds_var.setncatts(dict(\n long_name=\"flux_time\",\n units=time_unit,\n calendar=CALENDAR,\n ))\n\n infl_fun_var.setncatts(dict(\n long_name=\"influence_function\",\n description=(\"linearisation of the observation operator \"\n \"for carbon dioxide mixing ratios at the \"\n \"towers in terms of carbon dioxide mass fluxes\"),\n units=\"ppmv/(mol.m^-2.s^-1)\",\n long_units=\"ppmv/(mol_CO2.m^-2.s^-1)\",\n coordinates=(\"flux_time height latitude longitude \"\n \"site_names site_heights site_lats site_lons\"),\n # I don't think we can justify more than six or so digits\n # of precision. The transport is too uncertain.\n # The underlying int type doesn't support more than five.\n # The increased locality should also speed up use.\n scale_factor=np.array(CONVERSION_FACTOR, dtype=np.float32),\n grid_mapping=grid_mapping,\n valid_min=np.array(0, dtype=infl_fun_var.dtype),\n # description of coordinate relationships\n cell_methods=(\n # not entirely sure if space and obs time should be in\n # same sum. The two times are another possible\n # combination.\n \"height: dim_y: dim_x: sum \"\n \"observation_time: sum \"\n \"(interval: {lpdm_timestep:f} seconds) \"\n \"site: point \"\n # this sum is done later than the others\n \"flux_time: sum \"\n \"(interval: {minutes_per_file:d} minutes)\"\n \"\").format(minutes_per_file=(MINUTES_PER_HOUR //\n int(config[\"num_file_per_h\"])),\n lpdm_timestep=float(config[\"lpdm_timestep\"])),\n # What type of thing this is:\n coverage_content_type=\"modelResult\",\n ))\n # I want to store the counts directly\n infl_fun_var.set_auto_maskandscale(False)\n\n site_lats_var = ds.createVariable(\"site_lats\", \"f4\", (\"site\",))\n site_lons_var = ds.createVariable(\"site_lons\", \"f4\", (\"site\",))\n site_heights_var = ds.createVariable(\"site_heights\", \"f4\", (\"site\",))\n site_lats_var.setncatts(dict(\n units=\"degrees_north\", standard_name=\"latitude\",\n long_name=\"site_latitude\",\n coverage_content_type=\"coordinate\",\n description=\"latitude of the observation tower site\",\n origin=\"Set in LPD run script\"))\n site_lons_var.setncatts(dict(\n units=\"degrees_east\", standard_name=\"longitude\",\n long_name=\"site_longitude\",\n coverage_content_type=\"coordinate\",\n description=\"longitude of the observation tower site\",\n origin=\"Set in LPD run script\"))\n site_name_var.setncatts(dict(\n long_name=\"name_of_observation_site\",\n # most likely an abuse of CF section 9.5\n # cf_role=\"trajectory_id\"\n coverage_content_type=\"referenceInformation\",\n ))\n site_heights_var.setncatts(dict(\n standard_name=\"height\",\n long_name=\"site_heights\",\n description=\"height of the observation tower intake\",\n origin=\"Set in LPD run script\",\n coverage_content_type=\"coordinate\",\n positive=\"up\",\n units=\"m\"))\n\n height_var.setncatts(dict(\n standard_name=\"height\",\n long_name=\"flux_influence_height\",\n description=(\"How low the particles have to be \"\n \"to be \\\"influenced\\\" by the ground\"),\n origin=\"Constant CLOSE_TO_GROUND in carsurf_loop.py\",\n coverage_content_type=\"referenceInformation\",\n positive=\"up\",\n units=\"km\", bounds=\"height_bnds\"))\n height_bounds_var.setncatts(dict(\n long_name=\"height_bounds\",\n units=\"km\"))\n\n # pretty sure this fails somewhat badly at encapsulization\n set_coord_values(ds, wrf_out, footprint_nbins)\n site_name_var[:] = site_names\n\n return infl_fun_var", "def make_df_an_table(an_string, site_name='DSW', min_moon_dist=MIN_MOON_DISTANCE,\n min_hours=MIN_HOURS_OBSERVABLE):\n an_string = str(an_string) # (precaution in case int passed in)\n an_object = Astronight(an_string, site_name)\n # dark_start, dark_end = an_object.ts_dark.start, an_object.ts_dark.end\n mid_dark = an_object.local_middark_utc\n # dark_no_moon_start, dark_no_moon_end = an_object.ts_dark_no_moon.start, an_object.ts_dark_no_moon.end\n mpfile_dict = make_mpfile_dict()\n\n an_dict_list = [] # results to be deposited here, to make a dataframe later.\n for mp in mpfile_dict.keys():\n mpfile = mpfile_dict[mp]\n # an_dict doesn't need to include defaults for case before or after mpfile ephemeris,\n # because making the dataframe should put in NANs for missing keys anyway (check this later):\n an_dict = {'MPnumber': mpfile.number, 'MPname': mpfile.name, 'Motive': mpfile.motive,\n 'Priority': mpfile.priority, 'Period': mpfile.period}\n # Interpolate within ephemeris (because MP is moving in sky); 2 iterations s/be enough:\n data, status, ts_observable, mp_radec = None, None, None, None # keep stupid IDE happy.\n best_utc = mid_dark # best_utc will = mid-observable time at converged RA,Dec.\n\n # Converge on best RA, Dec, observable timespan (they interact, as MP is moving):\n hours_observable = 0.0 # default to keep IDE happy.\n for i in range(2):\n data = mpfile.eph_from_utc(best_utc)\n if data is None:\n if mpfile.eph_range[1] < an_object.ts_dark.start:\n status = 'too late'\n else:\n status = 'too early'\n break\n status = 'ok'\n mp_radec = RaDec(data['RA'], data['Dec'])\n ts_observable = an_object.ts_observable(mp_radec,\n min_alt=MIN_MP_ALTITUDE,\n min_moon_dist=min_moon_dist) # Timespan object\n hours_observable = ts_observable.seconds / 3600.0\n mid_observable = ts_observable.midpoint # for loop exit\n best_utc = mid_observable # update for loop continuation.\n\n # Mark valid MPs that are observable too briefly:\n if status.lower() == 'ok':\n if hours_observable < min_hours:\n status = 'too brief'\n\n # For MPs observable this night, add one line to table:\n # print(mpfile.name, status)\n an_dict['Status'] = status\n if status.lower() == 'ok':\n an_dict['RA'] = data['RA']\n an_dict['Dec'] = data['Dec']\n an_dict['StartUTC'] = ts_observable.start\n an_dict['EndUTC'] = ts_observable.end\n an_dict['TransitUTC'] = an_object.transit(mp_radec)\n an_dict['MoonDist'] = mp_radec.degrees_from(an_object.moon_radec)\n an_dict['PhaseAngle'] = data['Phase']\n an_dict['V_mag'] = data['V_mag']\n an_dict['ExpTime'] = float(round(float(calc_exp_time(an_dict['V_mag'],\n EXP_TIME_TABLE_PHOTOMETRY))))\n if an_dict['Period'] is not None:\n # Duty cycle is % of time spent observing this MP if one exposure per 1/60 of period.\n an_dict['DutyCyclePct'] = 100.0 * ((an_dict['ExpTime'] + EXP_OVERHEAD) / 60.0) / \\\n an_dict['Period']\n else:\n an_dict['DutyCyclePct'] = None\n if status.lower() == 'ok':\n an_dict['PhotrixPlanning'] = 'IMAGE MP_' + mpfile.number + \\\n ' Clear=' + str(an_dict['ExpTime']) + 'sec(***) ' + \\\n ra_as_hours(an_dict['RA'], seconds_decimal_places=1) + ' ' + \\\n degrees_as_hex(an_dict['Dec'], arcseconds_decimal_places=0)\n if an_dict['Period'] is not None:\n an_dict['Coverage'] = make_df_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges,\n (jd_from_datetime_utc(an_dict['StartUTC']),\n jd_from_datetime_utc(an_dict['EndUTC'])))\n an_dict['PhaseCoverage'] = make_df_phase_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges)\n else:\n an_dict['Coverage'] = None\n an_dict_list.append(an_dict)\n if len(an_dict_list) == 0:\n return None\n df_an_table = pd.DataFrame(data=an_dict_list)\n df_an_table.index = df_an_table['MPnumber'].values\n df_an_table = df_an_table.sort_values(by='TransitUTC')\n return df_an_table", "def update_stationlist(time_res='hourly',dbase_dir='dbase'):\r\n\r\n \r\n dwd_abbr = {'air_temperature': 'TU',\r\n 'cloud_type': 'CS', \r\n 'cloudiness': 'N',\r\n 'dew_point' : 'TD',\r\n 'extreme_temperature': 'TX',\r\n 'extreme_wind': 'FX',\r\n 'precipitation': 'RR',\r\n 'pressure': 'P0',\r\n 'soil_temperature': 'EB',\r\n 'solar': 'ST',\r\n 'sun': 'SD',\r\n 'visibility': 'VV',\r\n 'wind': 'FF',\r\n 'wind_synop': 'F'\r\n }\r\n \r\n # lets start\r\n print('Updating station list')\r\n \r\n # create output directory if not existing\r\n \r\n if not os.path.exists(dbase_dir):\r\n os.makedirs(dbase_dir)\r\n \r\n #check whether we have an up-to-date-station-list-already\r\n try:\r\n stations_network_old=[s for s in os.listdir(dbase_dir) if 'dwd_station_network' in s][0]\r\n datetime_network=datetime.date(datetime.strptime(re.findall('\\d+',stations_network_old)[0],'%Y%m%d'))\r\n #update if more than 24hours\r\n dt_today=datetime.date(datetime.now())\r\n if (dt_today-datetime_network)<timedelta(days=1):\r\n print('DWD network list is up-to-date, no update needed')\r\n filename_stations=dbase_dir+'\\\\'+stations_network_old\r\n return filename_stations\r\n else:\r\n print('DWD network list neeeds to be updated')\r\n os.remove(dbase_dir+'\\\\'+stations_network_old)\r\n except:\r\n print('DWD network list neeeds to be updated')\r\n pass\r\n \r\n \r\n # header\r\n stations_network=pd.DataFrame()\r\n \r\n # connect to ftp server and go to the folder\r\n \r\n # Connect to the Server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #change to subfolder\r\n ftp.cwd('/climate_environment/CDC/observations_germany/climate/' + time_res +'/')\r\n #get dwd categories\r\n dwd_categories=ftp.nlst()\r\n #loop through the subfolders to get the station lists\r\n for category in dwd_categories:\r\n print('retrieve stationlist for', category)\r\n #try to get historical data\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/historical/'\r\n ftp.cwd(dir_path)\r\n except Exception as e:\r\n print(e, 'try to download category', category, 'from other folder')\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/'\r\n ftp.cwd(dir_path)\r\n except:\r\n print('Category', category, 'could not have been downloaded')\r\n pass\r\n #retrieve the stationlist\r\n stationlist = []\r\n # try to retrieve file\r\n retrieved=False\r\n filename=dwd_abbr[category]+'_Stundenwerte_Beschreibung_Stationen.txt'\r\n while not retrieved:\r\n try:\r\n ftp.retrlines(\"RETR \" + filename, stationlist.append)\r\n #ftp.retrbinary(\"RETR \" + filestr, stationlist.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(dir_path)\r\n #remove first two lines\r\n stationlist=stationlist[2:]\r\n #delete uncessary blanks\r\n stationlist=[re.sub(' +', ' ', station.rstrip()) for station in stationlist]\r\n #split the list\r\n stationlist=[station.split(\" \")[:7] for station in stationlist]\r\n #read as dataframe\r\n dfstations=pd.DataFrame(stationlist,columns=['STATIONS_ID','date_start','date_end','height','geo_lat','geo_lon','name'])\r\n #add true information to category\r\n dfstations[category]=True\r\n \r\n stations_network=stations_network.append(dfstations,sort=False,ignore_index=True)\r\n #A=[sub.split(\" \") for sub in stationlist] \r\n \r\n #replace all Na by False\r\n stations_network[stations_network.isna()]=0 \r\n #aggregate\r\n stations_network=stations_network.groupby(['STATIONS_ID'],as_index=False).agg('max')\r\n #replace zero by False in order to have pure boolean data\r\n stations_network.replace(0,False,inplace=True)\r\n #fix the error with station 14138 and 05614 and 07325, which does not have pressure cord\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='05614','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='07325','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='01572','pressure']=False\r\n #for temperature the same\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','air_temperature']=False\r\n #save to database writing the time as well\r\n filename_stations=dbase_dir+'\\\\dwd_station_network_'+datetime.now().strftime('%Y%m%d')+'.csv'\r\n stations_network.to_csv(filename_stations,index=False)\r\n \r\n print('Updating station list...finished')\r\n \r\n return filename_stations", "def synthesize_employment_data(self, config):\r\n jobs_by_zone_by_sector_table_name = config['jobs_by_zone_by_sector']\r\n gridcells_table_name = config['gridcells']\r\n jobs_table_name = config['jobs']\r\n gridcells_output_table_name = config['gridcells_output']\r\n jobs_output_table_name = config['jobs_output']\r\n \r\n input_db_name = config['db_config'].database_name\r\n output_db_name = config['output_database_name']\r\n \r\n sectors = config['sector_names_and_ids']\r\n building_types_and_ids_and_home_based = config[\r\n 'building_type_column_names_and_ids_and_home_based']\r\n \r\n building_types = []\r\n building_ids = []\r\n home_based = [] \r\n for type, id, home in building_types_and_ids_and_home_based:\r\n building_types += [type]\r\n building_ids += [id]\r\n home_based += [home]\r\n \r\n \r\n from_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = input_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n to_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = output_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n\r\n FlattenScenarioDatabaseChain().copy_scenario_database(\r\n from_database_configuration = from_database_configuration, \r\n to_database_configuration = to_database_configuration,\r\n tables_to_copy = [gridcells_table_name, jobs_table_name])\r\n \r\n db_server = DatabaseServer(to_database_configuration) \r\n output_database = db_server.get_database(output_db_name)\r\n \r\n sector_name = 0; sector_id = 1\r\n \r\n sector = {}\r\n for entry in sectors:\r\n name = entry[sector_name]\r\n id = entry[sector_id]\r\n sector[id] = self._get_jobs_per_building_type_in_sector_by_zone(\r\n output_database, jobs_by_zone_by_sector_table_name, \r\n jobs_table_name, name, id)\r\n\r\n results = self._get_building_type_proportion_by_zone(output_database, \r\n gridcells_table_name)\r\n \r\n grid_id = 0; zone_id = 1\r\n dist = {}\r\n \r\n type_index = {}\r\n \r\n for name in building_types:\r\n for i in range(len(results[0])):\r\n column_name = results[0][i]\r\n if name == column_name:\r\n type_index[name] = i\r\n break;\r\n else:\r\n raise KeyError, ('No column by the name of \\'%s\\' found in '\r\n 'the database.' % name) \r\n\r\n for name in building_types:\r\n dist[name] = {}\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] = []\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] += [(row[grid_id], \r\n row[type_index[name]])]\r\n \r\n jobs_table_data = self._create_jobs_table_data(dist, sector,\r\n building_types_and_ids_and_home_based)\r\n \r\n output_database.execute('USE %(out_db)s' % {'out_db':output_db_name})\r\n \r\n output_database.execute(\"\"\"\r\n CREATE TABLE %(jobs_out)s (\r\n JOB_ID INT AUTO_INCREMENT, PRIMARY KEY(JOB_ID),\r\n GRID_ID INT, HOME_BASED INT, SECTOR_ID INT, BUILDING_TYPE INT);\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n \r\n if len(jobs_table_data) > 0:\r\n output_prefix = (\r\n \"\"\"INSERT INTO %(jobs_out)s \r\n (GRID_ID, HOME_BASED, SECTOR_ID, BUILDING_TYPE) VALUES\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n output_postfix = ';'\r\n \r\n step = 1000\r\n length = len(jobs_table_data)\r\n iterations = int(length/step) + 1\r\n \r\n for i in range(iterations):\r\n low = i*step\r\n high = (i+1)*step\r\n \r\n if high > length: high = length\r\n \r\n output_body = \"\"\r\n \r\n for j in range(low, high):\r\n output_body += (\r\n '(%(grid)s, %(home)s, %(sector)s, %(building)s),\\n' \r\n % jobs_table_data[j])\r\n \r\n output_query = \"%s%s%s\" % (output_prefix, \r\n output_body[:-2], \r\n output_postfix)\r\n\r\n output_database.execute(output_query)\r\n \r\n \r\n ### TODO: \r", "def insert_humans_staging(self):\n for year in range(1880, CURRENT_YEAR):\n self.load_wikidata(\"humans\", HUMANS_BY_YEAR_SPARQL_QUERY, INSERT_HUMAN_SQL_QUERY,\n INSERT_HUMAN_MAP_COLUMNS, year=year)", "def disaggregate(self, mains, output_datastore):\n \n building_path = '/building{}'.format(mains.building())\n # only writes one appliance and meter per building\n meter_instance = 2\n mains_data_location = '{}/elec/meter1'.format(building_path)\n \n #dis_main = pd.DataFrame()\n chunk_number = 0\n timeframes = []\n\n for chunk in mains.power_series():\n \n # Record metadata\n timeframes.append(chunk.timeframe)\n measurement = chunk.name\n cols = pd.MultiIndex.from_tuples([chunk.name])\n \n dis_chunk = self.disaggregate_chunk(\n pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))\n #dis_main = pd.concat([dis_main, dis_chunk])\n chunk_number += 1\n print(str(chunk_number) + \" chunks disaggregated\")\n \n # Write appliance data to disag output\n key = '{}/elec/meter{}'.format(building_path, meter_instance)\n df = pd.DataFrame(\n dis_chunk.values, index=dis_chunk.index,\n columns=cols)\n output_datastore.append(key, df)\n\n # Copy mains data to disag output\n output_datastore.append(key=mains_data_location,\n value=pd.DataFrame(chunk, columns=cols))\n\n # Saving output datastore:\n #output_datastore.append(key=mains.key, value=dis_main)\n \n ##################################\n # Add metadata to output_datastore\n\n # TODO: `preprocessing_applied` for all meters\n # TODO: split this metadata code into a separate function\n # TODO: submeter measurement should probably be the mains\n # measurement we used to train on, not the mains measurement.\n \n date_now = datetime.now().isoformat().split('.')[0]\n output_name = 'NILMTK_MLE_' + date_now\n resample_seconds = 10\n mains_data_location = '{}/elec/meter1'.format(building_path)\n\n # DataSet and MeterDevice metadata:\n meter_devices = {\n 'MLE': {\n 'model': 'MLE',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n },\n 'mains': {\n 'model': 'mains',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n }\n }\n\n merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)\n total_timeframe = TimeFrame(merged_timeframes[0].start,\n merged_timeframes[-1].end)\n\n dataset_metadata = {'name': output_name, 'date': date_now,\n 'meter_devices': meter_devices,\n 'timeframe': total_timeframe.to_dict()}\n output_datastore.save_metadata('/', dataset_metadata)\n\n # Building metadata\n\n # Mains meter:\n elec_meters = {\n 1: {\n 'device_model': 'mains',\n 'site_meter': True,\n 'data_location': mains_data_location,\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n }\n\n # Appliances and submeters:\n appliances = []\n appliance = {\n 'meters': [meter_instance],\n 'type': 'kettle',\n 'instance': 1\n # TODO this `instance` will only be correct when the\n # model is trained on the same house as it is tested on.\n # https://github.com/nilmtk/nilmtk/issues/194\n }\n appliances.append(appliance)\n\n elec_meters.update({\n meter_instance: {\n 'device_model': 'MLE',\n 'submeter_of': 1,\n 'data_location': ('{}/elec/meter{}'\n .format(building_path, meter_instance)),\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n })\n elec_meters[meter_instance]['name'] = 'kettle'\n\n building_metadata = {\n 'instance': mains.building(),\n 'elec_meters': elec_meters,\n 'appliances': appliances\n }\n\n output_datastore.save_metadata(building_path, building_metadata)", "def _populate_zone_facts_table(self):\n census_fields = [\n 'poverty_rate', 'fraction_black', 'income_per_capita',\n 'labor_participation', 'fraction_foreign',\n 'fraction_single_mothers', 'acs_lower_rent_quartile',\n 'acs_median_rent', 'acs_upper_rent_quartile'\n ]\n\n zone_types = ['ward', 'neighborhood_cluster', 'census_tract']\n\n query_results = list()\n\n # populate columns accordingly for each zone_specific type\n for zone_type in zone_types:\n field_values = dict()\n\n # get field value for each zone_specific type\n for field in census_fields:\n result = self._census_with_weighting(data_id=field,\n grouping=zone_type)\n field_values[field] = result['items']\n\n zone_specifics = self._get_zone_specifics_for_zone_type(zone_type)\n\n # TODO: add aggregate for each zone_type into table\n for zone in zone_specifics:\n # get not None values so we can added to db\n columns = list()\n values = list()\n for field in census_fields:\n zone_value = field_values[field][zone]\n\n if zone_value is not None:\n columns.append(field)\n values.append(\"'\" + str(zone_value) + \"'\")\n\n # derive column and values strings needed for sql query\n columns = ', '.join(columns)\n columns = 'zone, ' + columns\n\n values = ', '.join(values)\n values = \"'\" + zone + \"', \" + values\n\n q = \"INSERT INTO zone_facts ({cols}) VALUES ({vals})\".format(\n cols=columns, vals=values)\n\n with self.engine.connect() as conn:\n result = conn.execute(q)\n query_results.append(result)\n\n return query_results", "def office_station_statistics(parser, args, params): \n parser.add_argument(\n '--station_list', type=str, help='Station file from IRIS',\n required=True, metavar='x')\n \n local_args = parser.parse_known_args(args)\n \n station_list = local_args[0].station_list\n control.station_statistics(params, station_list)", "def create_extract():\n with open(SCHEMA_FILE, \"r\") as f:\n SCHEMA = yaml.safe_load(f)\n\n with open(TOKEN_FILE, \"r\") as f:\n TOKEN = yaml.safe_load(f)\n\n hc = HyperCreator(SCHEMA, HYPER_FILE)\n ts = Tableau(TOKEN[\"server\"], TOKEN[\"site\"], TOKEN[\"name\"], TOKEN[\"value\"])\n\n for table in SCHEMA[\"tables\"]:\n with open(f\"{CONTENT_MANAGEMENT}/{table['query']}\", \"r\") as f:\n query = f.read()\n\n data = ts.query_metadata(query)\n data_map = getattr(GraphQL, table[\"name\"])(data)\n\n hc.populate_extract(table[\"name\"], data_map)", "def __init__(self, locator, weather_data, building_names=None):\n\n if building_names is None:\n building_names = locator.get_zone_building_names()\n\n self.building_names = building_names\n print(\"read input files\")\n prop_geometry = Gdf.from_file(locator.get_zone_geometry())\n prop_geometry['footprint'] = prop_geometry.area\n prop_geometry['perimeter'] = prop_geometry.length\n prop_geometry['Blength'], prop_geometry['Bwidth'] = self.calc_bounding_box_geom(locator.get_zone_geometry())\n prop_geometry = prop_geometry.drop('geometry', axis=1).set_index('Name')\n prop_hvac = dbf_to_dataframe(locator.get_building_air_conditioning())\n\n prop_typology = dbf_to_dataframe(locator.get_building_typology()).set_index('Name')\n # Drop 'REFERENCE' column if it exists\n if 'REFERENCE' in prop_typology:\n prop_typology.drop('REFERENCE', axis=1, inplace=True)\n prop_architectures = dbf_to_dataframe(locator.get_building_architecture())\n prop_comfort = dbf_to_dataframe(locator.get_building_comfort()).set_index('Name')\n prop_internal_loads = dbf_to_dataframe(locator.get_building_internal()).set_index('Name')\n prop_supply_systems_building = dbf_to_dataframe(locator.get_building_supply())\n\n # GET SYSTEMS EFFICIENCIES\n prop_supply_systems = get_properties_supply_sytems(locator, prop_supply_systems_building).set_index('Name')\n\n # get temperatures of operation\n prop_HVAC_result = get_properties_technical_systems(locator, prop_hvac).set_index('Name')\n\n # get envelope properties\n prop_envelope = get_envelope_properties(locator, prop_architectures).set_index('Name')\n\n # get properties of rc demand model\n prop_rc_model = self.calc_prop_rc_model(locator, prop_typology, prop_envelope,\n prop_geometry, prop_HVAC_result)\n\n # get solar properties\n solar = get_prop_solar(locator, building_names, prop_rc_model, prop_envelope, weather_data).set_index('Name')\n\n # df_windows = geometry_reader.create_windows(surface_properties, prop_envelope)\n # TODO: to check if the Win_op and height of window is necessary.\n # TODO: maybe mergin branch i9 with CItyGML could help with this\n print(\"done\")\n\n # save resulting data\n self._prop_supply_systems = prop_supply_systems\n self._prop_geometry = prop_geometry\n self._prop_envelope = prop_envelope\n self._prop_typology = prop_typology\n self._prop_HVAC_result = prop_HVAC_result\n self._prop_comfort = prop_comfort\n self._prop_internal_loads = prop_internal_loads\n self._prop_age = prop_typology[['YEAR']]\n self._solar = solar\n self._prop_RC_model = prop_rc_model", "def createStations (config):\n trace (\"createStations()\")\n for section in config.sections():\n if section.capitalize().startswith(\"Station\"):\n myPressureProbes = myHumidityProbes = []\n myTemperatureProbes= []\n name = section\n for option in config.options (section):\n value = config.get (section, option)\n opt = option.capitalize()\n if opt == \"Name\":\n name = value\n elif opt == \"Temperature\":\n myTemperatureProbes = getProbeList (value,\n temperatureProbes)\n elif opt == \"Pressure\":\n myPressureProbes = getProbeList (value,\n pressureProbes)\n elif opt == \"Humidity\":\n myHumidityProbes = getProbeList (value,\n humidityProbes)\n stations [name] = Station.Station(myTemperatureProbes,\n myPressureProbes, myHumidityProbes, name)", "def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)", "def _obtain_data(self):\n (self.data_df, self.column_df, self.station_name, self.log_file, self.station_lat, self.station_lon,\n self.station_elev, self.ws_anemometer_height, self.missing_fill_value, self.script_mode,\n self.auto_mode, self.fill_mode, self.metadata_mode, self.generate_bokeh, self.metadata_df,\n metadata_series) = input_functions.obtain_data(self.config_path, self.metadata_path)\n\n if self.script_mode == 1: # correcting data\n self.mc_iterations = 1000 # Number of iters for MC simulation of thornton running solar radiation gen\n else:\n self.mc_iterations = 50 # if we're not correcting data then only do a few iterations to save time\n\n print(\"\\nSystem: Raw data successfully extracted from station file.\")\n\n # Extract individual variables from data frame back into to numpy arrays.\n self.data_year = np.array(self.data_df.year)\n self.data_month = np.array(self.data_df.month)\n self.data_day = np.array(self.data_df.day)\n self.data_tavg = np.array(self.data_df.tavg)\n self.data_tmax = np.array(self.data_df.tmax)\n self.data_tmin = np.array(self.data_df.tmin)\n self.data_tdew = np.array(self.data_df.tdew)\n self.data_ea = np.array(self.data_df.ea)\n self.data_rhavg = np.array(self.data_df.rhavg)\n self.data_rhmax = np.array(self.data_df.rhmax)\n self.data_rhmin = np.array(self.data_df.rhmin)\n self.data_rs = np.array(self.data_df.rs)\n self.data_ws = np.array(self.data_df.ws)\n self.data_precip = np.array(self.data_df.precip)\n\n self.output_file_path = \"correction_files/\" + self.station_name + \"_output\" + \".xlsx\"", "def populate_locations(connection):\n print('Populating locations...')\n cursor = connection.cursor()\n with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file:\n locations = json.load(json_file)\n\n for station_id, location in locations.items():\n cursor.execute(f'''SELECT id \n FROM watercourse_stations \n WHERE id = {station_id}''')\n\n if len(cursor.fetchall()):\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n cursor.execute(f'''UPDATE watercourse_stations\n SET location_id = {cursor.lastrowid}\n WHERE id = {station_id}''')", "def run():\n # build the station list and update the current levels\n station_list = build_station_list()\n update_water_levels(station_list, use_cache=True)\n\n num_stations = 10\n highest_level_stations = stations_highest_rel_level(station_list, num_stations)\n\n print(\"{} stations with the highest relative water levels, in descending order:\".format(num_stations))\n for station in highest_level_stations:\n print(station.name, station.relative_water_level())", "def populate_agdds(start_date, end_date, source, source_id, stations):\r\n # possibly grab ACIS station data (for entire date range)\r\n if source == 'ACIS':\r\n station_ids = []\r\n for station in stations:\r\n station_ids.append(station['char_network_id'])\r\n acis_data = get_acis_climate_data(\",\".join(station_ids), 'mint,maxt,gdd32,gdd50', start_date, end_date)\r\n\r\n for station in stations:\r\n print(station['char_network_id'])\r\n # grab previous days tmin, tmax, and agdd for both bases from mysql agdds table and start over at year breaks\r\n day_before_start_date = start_date - timedelta(days=1)\r\n if day_before_start_date.year == start_date.year:\r\n prev_tmin = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmin')\r\n prev_tmax = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmax')\r\n agdd32 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'agdd')\r\n agdd50 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 50, 'agdd')\r\n else:\r\n prev_tmin = None\r\n prev_tmax = None\r\n agdd32 = None\r\n agdd50 = None\r\n\r\n if prev_tmin is None or prev_tmin == 'M':\r\n prev_tmin = 0\r\n if prev_tmax is None or prev_tmax == 'M':\r\n prev_tmax = 0\r\n if agdd32 is None or agdd32 == 'M':\r\n agdd32 = 0\r\n if agdd50 is None or agdd50 == 'M':\r\n agdd50 = 0\r\n\r\n # possibly find station of interest from ACIS retrieved data\r\n acis_station = None\r\n if source == 'ACIS':\r\n station_found = False\r\n for a_station in acis_data['data']:\r\n if station_found:\r\n break\r\n for sid in a_station['meta']['sids']:\r\n # print(sid)\r\n # print(station['char_network_id'])\r\n if station['char_network_id'] in sid:\r\n station_found = True\r\n acis_station = a_station\r\n break\r\n if not station_found:\r\n print(\"Could not find station \" + station['char_network_id'])\r\n\r\n previous_year = start_date.year\r\n delta = end_date - start_date\r\n for i in range(delta.days + 1):\r\n day = start_date + timedelta(days=i)\r\n doy = day.timetuple().tm_yday\r\n\r\n # reset the agdd to 0 if we go into a new year\r\n if previous_year != day.year:\r\n agdd32 = 0\r\n agdd50 = 0\r\n previous_year = day.year\r\n\r\n missing_data = False\r\n print(day.strftime(\"%Y-%m-%d\"))\r\n\r\n # see if we already have tmin and tmax from local db\r\n # tmin = None\r\n # tmax = None\r\n tmin = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmin')\r\n tmax = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmax')\r\n\r\n already_retrieved = False\r\n if tmin is not None and tmin != 'M' and tmax is not None and tmax != 'M' and source != 'PRISM':\r\n already_retrieved = True\r\n\r\n # don't already have tmin and tmax locally so grab from URMA postgis db or ACIS data\r\n if not already_retrieved:\r\n if source == 'URMA':\r\n if station['char_value'] == 'AK':\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'alaska')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'alaska')\r\n else:\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'conus')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'conus')\r\n # URMA and PRISM are in celsius in our postgis db everything else is Fer so convert here\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif source == 'PRISM':\r\n tmin = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmin')\r\n tmax = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmax')\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif acis_station is not None:\r\n tmin = acis_station['data'][i][0]\r\n tmax = acis_station['data'][i][1]\r\n\r\n # if tmin or tmax is missing, set to previous day's and mark as missing\r\n if tmin is not None and tmin != 'M':\r\n tmin = float(tmin)\r\n prev_tmin = tmin\r\n else:\r\n missing_data = True\r\n tmin = prev_tmin\r\n if tmax is not None and tmax != 'M':\r\n tmax = float(tmax)\r\n prev_tmax = tmax\r\n else:\r\n missing_data = True\r\n tmax = prev_tmax\r\n\r\n # compute gdd and agdd for both bases\r\n gdd32 = compute_gdd(tmin, tmax, 32)\r\n gdd50 = compute_gdd(tmin, tmax, 50)\r\n\r\n agdd32 += gdd32\r\n agdd50 += gdd50\r\n\r\n if not already_retrieved:\r\n # do an insert or update\r\n add_agdd_row(station['station_id'], source_id, gdd32, agdd32, day.year, doy, day, 32, missing_data, tmin, tmax)\r\n add_agdd_row(station['station_id'], source_id, gdd50, agdd50, day.year, doy, day, 50, missing_data, tmin, tmax)", "def datamaker(band, skypos, outfile, maglimit=20., margin=0.005,\n searchradius=0.1, radius=gt.aper2deg(4), annulus=[0.0083, 0.025],\n verbose=0):\n\n extant_objids = file_setup(outfile)\n\n if extant_objids == False:\n print('NOT RUNNING!!*!')\n return False\n\n uniques = dt.find_unique_sources(band, skypos[0], skypos[1], searchradius,\n maglimit=maglimit)\n\n if uniques is None:\n print('No sources at this position.')\n return\n\n for pos in uniques:\n mcat = dt.get_mcat_data(pos, margin)\n if not mcat:\n print('Nothing at {pos}.'.format(pos=pos))\n continue\n extant_objids = file_setup(outfile)\n for i, objid in enumerate(mcat['objid']):\n if mcat[band]['ra'][i] == -99. and mcat[band]['dec'][i] == -99.:\n print('No {b} source'.format(b=band))\n continue\n if objid in extant_objids:\n print('Already processed.')\n continue\n #exp = dt.exp_from_objid(objid)\n if mcat[band]['t0'][i] < 0:\n print('No MCAT exposure: skipping')\n continue\n print([mcat[band]['ra'][i], mcat[band]['dec'][i]])\n print([mcat[band]['t0'][i], mcat[band]['t1'][i]])\n data = gAperture(band, [mcat[band]['ra'][i], mcat[band]['dec'][i]],\n radius, annulus=annulus, verbose=verbose,\n coadd=True, trange=[mcat[band]['t0'][i],\n mcat[band]['t1'][i]],\n detsize=1.25)\n try:\n csv_construct = construct_row(i, band, objid, mcat, data)\n print(csv_construct)\n with open(outfile, 'ab') as csvfile:\n spreadsheet = csv.writer(csvfile, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n spreadsheet.writerow(csv_construct)\n except TypeError:\n continue\n\n return", "def load_meters_from_buliding(self, target_building, meters_name=[], sample_rate = '1min'):\n if self.df is None:\n self.read_data_from_csv()\n \n if len(meters_name) < 1 :\n meters_name = self.meter_name.keys()\n\n if 'main' in meters_name:\n meters_name.remove('main')\n \n building_meters = self.df.groupby('buildingid').get_group(target_building)\n building_meters.index = pd.to_datetime(building_meters['reporttime'], format='%Y-%m-%d %H:%M:%S')\n building_meters = building_meters.groupby('channelid')\n building_channels = building_meters.groups.keys()\n \n if self.meter_name['main'][0] not in building_channels: return\n buliding_df = building_meters.get_group(self.meter_name['main'][0]).rename(columns={\"w\": \"main\"})\n buliding_df = buliding_df.resample(sample_rate, how='mean')\n target_meters = ['main']\n\n for meter, channel_ids in self.meter_name.iteritems():\n if meter in meters_name and channel_ids[0] in building_channels:\n appliance_meter = building_meters.get_group(channel_ids[0]).rename(columns={\"w\": meter})\n \n for channel_id in channel_ids[1:]:\n if channel_id not in building_channels: continue\n another_channel = building_meters.get_group(channel_id).rename(columns={\"w\": meter})\n appliance_meter.append(another_channel)\n\n appliance_meter = appliance_meter.resample(sample_rate, how='mean')\n buliding_df = pd.merge(buliding_df, appliance_meter, right_index=True, left_index=True, how='left')\n target_meters.append(meter)\n \n buliding_df = buliding_df[target_meters]\n buliding_df = buliding_df[~buliding_df.index.duplicated()]\n if buliding_df is not None:\n self.buliding_df.setdefault(target_building, buliding_df)\n \n return buliding_df", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def dataframe():\n\t#allows function to access station, gmt, and miss_station functions\n global stations\n\tglobal gmt\n\tglobal miss_station\n\t\n\t#read predictor file\n\tcontrol = cfg.read_yaml('../registry/graphs.yaml')\n\tpred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file))\n\tpredd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file))\n\n\t#get file paths and update database\n\tpredictor_file_path = control.predictor_file_path\n\tpredictand_file_path = control.predictand_file_path\n\tpred_file_id = update(predictor_file_path)\n\tpredd_file_id = update(predictand_file_path)\n\t\n\t#store lead time and date range\n\tlead_time = control.lead_time\n\tdate_range = control.date_range\n\n\t#get info for fetch many dates\n\tstart,end,stride = read_pred.parse_range(date_range)\n\tfcst_ref_time = control.date_range[0].split('-')[0][-2:]\n\t\n\t#initialize list of predictors\n\tpred_list = pred_ctrl.predictors\n\tpredictor = []\n\n\t#loops through predictors to build camps data objects\n\tfor entry_dict in pred_list:\n\t\t#formats metadata\n\t\tpred = create.preprocess_entries(entry_dict, fcst_ref_time)\n\t\t\n\t\t#adds info to metadata that's not currently being stored\n\t\tpred.search_metadata['reserved2'] = lead_time*3600\n pred.search_metadata['file_id'] = pred_file_id\n\t\tpred.search_metadata['reserved1'] = 'vector'\n\n\t\t#build camps data objects for each day\n\t\tvariable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata)\n\t\t\n\t\t#appends all data to single camps object\n\t\tif variable[0] is not None:\n\t\t\tvar = variable[0]\n\t\t\tarrs = []\n\t\t\tfor i in range(len(variable)):\n\t\t\t\tarrs.append(variable[i].data)\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictor.append(var)\n\n\t#initializes list of predictands\n\tpredd_list = predd_ctrl.predictands\n predictand = []\n\t\n\t#loops through predictands to build camps data objects\n for entry_dict in predd_list:\n\t\t#formats metadata\n \tvertical_coordinate = entry_dict.pop('Vertical_Coordinate')\n\t\tentry_dict['file_id'] = predd_file_id\n\n\t\t#build camps objects for each day\n variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict)\n\n\t\t#append all data to single camps object\n var = variable[0]\n arrs = []\n for i in range(len(variable)):\n arrs.append(variable[i].data)\n try:\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictand.append(var)\n\t\texcept:\n\t\t\tprint(\"Can't read \" + variable.name)\n\n\t#getting predictor station and time data\n\tpredr = Dataset(predictor_file_path[0])\n\tpredr_stat = predr.variables['station'][:]\n\tif lead_time == 3:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant'][:]\n\telif lead_time == 6:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant1'][:]\n\telif lead_time == 12:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant2'][:]\n\tpredr.close()\n\n\t#reformatting predictor station and time data\n\tpredr_stations = stations(predr_stat)\n\tpredr_gmt = gmt(predr_time)\n\t\n\t#getting predictand station and time data\n\tpredd = Dataset(predictand_file_path[0])\n\tpredd_stat = predd.variables['station'][:]\n\tpredd_time = predd.variables['OM__resultTime'][:]\n\tpredd.close()\n\t\n\t#reformatting predictand station and time data\n\tpredd_stations = stations(predd_stat)\n\tpredd_gmt = gmt(predd_time)\n\n\t#choosing predictand observations that line up with predictor time\n\thour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time\n\tdays = len(predd_gmt)/24\n\tpredd_hours = [0]*days\n k=0\n for i in range(len(predd_gmt)):\n if i%24 == hour:\n\t\t\tpredd_hours[k]=predd_gmt[i]\n\t\t\tk+=1\n\t\n\t#catches when GFS data doesn't cover the last day of the month\n\tif len(predr_gmt) < len(predd_hours):\n\t\tpredd_hours = predd_hours[:-1]\t\n\t\n\t#find missing stations\n\tmiss_stations = miss_station(predr_stations,predd_stations)\n\tstations = predd_stations\n\t\n\t#station and time array\n\tinfo = [['',''] for k in range(len(predr_gmt)*len(stations))]\n\tfor i in range(len(predr_gmt)):\n\t\tfor j in range(len(stations)):\n\t\t\tk = i*len(stations)+j\n\t\t\tinfo[k][0]=predr_gmt[i]\n\t\t\tinfo[k][1]=stations[j]\n\n\t#create column names\n\tnames = ['']*(len(predictor)+len(predictand)+2)\n\tnames[0]='Time'\n\tnames[1]='Station'\n\n\t#creating array\n\tarr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand)))\n\t\n\t#adding predictor data\n\tfor i in range(len(predictor)):\n\t\t#remove lead time and forecast reference time from variable name\n\t\t#and add variable name to column list of final dataframe\n\t\tif lead_time == 12:\n\t\t\tnames[i+2]='GFS_'+predictor[i].get_variable_name()[:-11]\n\t\telse:\n\t\t\t names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10]\n\n\t\t#create pandas dataframe of data and sort alphabetically by station name\n\t\tpredictor[i].data = np.squeeze(predictor[i].data,axis=2)\n\t\tpredictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt)\n\t\tpredictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1)\n\t\t\n\t\t#remove stations with no predictand data\n\t\tk=0\n\t\ta=miss_stations[:]\n\t\tfor j in predictor[i].data.columns:\n\t\t\tif not a:\n\t\t\t\tbreak\n\t\t\tif j==a[k]:\n\t\t\t\tpredictor[i].data=predictor[i].data.drop(j,axis=1)\n\t\t\t\tdel a[k]\n\t\t\n\t\t#add data to final dataframe\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tarr[k][i] = predictor[i].data.iloc[b][c]\n\n\t#add predictand data\n\tfor i in range(len(predictand)):\n\t\t#removing extra underscore, adding variable name to column names\n\t\tnames[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1]\n\t\n\t\t#resize array and create pandas dataframe\n\t\tpredictand[i].data = np.squeeze(predictand[i].data,axis=2)\n\t\tpredictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours)\n\t\tpredictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1)\n\t\t\n\t\t#remove extra days of predictand data\n\t\tpredictand[i].data = predictand[i].data.iloc[0:len(predr_time),:]\n\t\t\t\n\t\t#add predictand data to array\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tval = predictand[i].data.iloc[b][c]\n\t\t\t\t\n\t\t\t\t#catch metar fill data\n\t\t\t\tif val == 9999: \n\t\t\t\t\tval = np.nan\n\t\t\t\tarr[k][len(predictor)+i]=val\n\t\n\t#add station and time data to array and save as csv\n\tdata = np.concatenate([info,arr],axis = 1)\n\tto_save = pd.DataFrame(data,columns=names)\n\tto_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')", "def substation_main(locator, total_demand, building_names, gv, Flag):\n\n t0 = time.clock()\n # generate empty vectors\n Ths = np.zeros(8760)\n Tww = np.zeros(8760)\n Tcs = np.zeros(8760) + 1E6\n\n # determine grid target temperatures at costumer side.\n iteration = 0\n buildings = []\n for name in building_names:\n buildings.append(pd.read_csv(locator.get_demand_results_folder() + '//' + name + \".csv\",\n usecols=['Name', 'Thsf_sup_C', 'Thsf_re_C', 'Tcsf_sup_C', 'Tcsf_re_C',\n 'Twwf_sup_C', 'Twwf_re_C', 'Qhsf_kWh', 'Qcsf_kWh', 'Qwwf_kWh',\n 'mcphsf_kWperC', 'mcpwwf_kWperC', 'mcpcsf_kWperC',\n 'Ef_kWh']))\n Ths = np.vectorize(calc_DH_supply)(Ths.copy(), buildings[iteration].Thsf_sup_C.values)\n Tww = np.vectorize(calc_DH_supply)(Tww.copy(), buildings[iteration].Twwf_sup_C.values)\n Tcs = np.vectorize(calc_DC_supply)(Tcs.copy(), buildings[iteration].Tcsf_sup_C.values)\n iteration += 1\n T_DHS = np.vectorize(calc_DH_supply)(Ths, Tww)\n T_DHS_supply = np.where(T_DHS > 0, T_DHS + gv.dT_heat, T_DHS)\n T_DCS_supply = np.where(Tcs != 1E6, Tcs - gv.dT_cool, 0)\n\n # Calculate disconnected buildings files and substation operation.\n if Flag:\n index = 0\n combi = [0] * len(building_names)\n for name in building_names:\n print name\n dfRes = total_demand[(total_demand.Name == name)]\n combi[index] = 1\n key = \"\".join(str(e) for e in combi)\n dfRes.to_csv(locator.get_optimization_substations_total_file(key), sep=',', float_format='%.3f')\n combi[index] = 0\n # calculate substation parameters per building\n substation_model(locator, gv, buildings[index], T_DHS, T_DHS_supply, T_DCS_supply, Ths, Tww)\n index += 1\n else:\n index =0\n # calculate substation parameters per building\n for name in building_names:\n substation_model(locator, gv, buildings[index], T_DHS, T_DHS_supply, T_DCS_supply, Ths, Tww)\n index += 1\n print time.clock() - t0, \"seconds process time for the Substation Routine \\n\"", "def measurements():\n measurements_for_displaying = db.session.query(Measurement).all()\n return render_template('measurement/measurements.html', measurements=measurements_for_displaying)", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def load_station_table(txn):\n LOG.info(\"load_station_table called() ...\")\n txn.execute(\n \"SELECT id, ST_x(geom) as lon, ST_y(geom) as lat from stations \"\n \"where network in ('NEXRAD','TWDR')\"\n )\n for row in txn.fetchall():\n ST[row[\"id\"]] = {\"lat\": row[\"lat\"], \"lon\": row[\"lon\"]}\n LOG.info(\"Station Table size %s\", len(ST.keys()))", "def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")", "def read_table_stations(self):\n if not os.path.exists(self.station_table_filename):\n LOGGER.warning('could not find station.table file \"%s\"', self.station_table_filename)\n return self.known_stations\n count = 0\n with open(self.station_table_filename, 'r') as textfile:\n lines = textfile.read().split(LF)\n for line in lines:\n station_id, data = read_table_station_from_line(line)\n if station_id is not None:\n self.known_stations[station_id] = data\n count += 1\n self.station_file_age = os.path.getmtime(self.station_table_filename)\n LOGGER.info(' Loaded %i station records from \"%s\"', count, self.station_table_filename)\n return self.known_stations", "def _extract_all(sel,\n rootpath=r'D:\\WorkStation_2018\\WorkStation_dynamicFC\\Workstation_dynamic_fc_baobaoComputer\\Data\\Dynamic',\n whichstate='state1'):\n\n # hc\n group_name = 'HC'\n data_hc = sel._extract_one(rootpath, whichstate, group_name)\n\n # mdd\n group_name = 'MDD'\n data_mdd = sel._extract_one(rootpath, whichstate, group_name)\n\n # bd\n group_name = 'BD'\n data_bd = sel._extract_one(rootpath, whichstate, group_name)\n\n # sz\n group_name = 'SZ'\n data_sz = sel._extract_one(rootpath, whichstate, group_name)\n\n # concat\n data_all = pd.concat([data_hc, data_mdd, data_bd, data_sz], axis=0)\n data_all.index = np.arange(0, np.shape(data_all)[0])\n\n return data_all", "def station_stats(df):", "def collect_data(default_path, elects_d, widths, lmbdas):\n cols = [\"elect\", \"d\", \"lmbda\", \\\n \"Dx\", \"Dy\", \"Dz\", \"Dxy\", \"Dyz\", \"Dxz\", \"D3d\"]\n index = np.arange(len(lmbdas) * len(widths) * len(elects_d))\n df = DataFrame(index=index, columns=cols)\n\n cnt = 0\n for el in sorted(elects_d.keys()):\n for d in widths:\n for l in lmbdas:\n data = [elects_d[el], d, l]\n fname = \"diffusivity_%s_d%i_l%i.log\" % (el, d, l)\n fpath = default_path + fname\n try:\n f = open(fpath, \"r\").readlines()\n for line in f:\n if \"1d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n if \"2d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n if \"3d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n except FileNotFoundError:\n print(\"File not found: %s.\" % fpath)\n\n if len(data) == 10:\n df.loc[cnt] = data\n cnt += 1\n return df", "def write_to_vacancies_metro_stations_table(config, tables_cache):\r\n database = deepcopy(config[\"database\"])\r\n vacancies_metro_stations_table = \\\r\n deepcopy(config[\"tables\"][\"vacancies_metro_stations_table\"])\r\n in_tests.test_write_to_database_from_dict(\r\n database, vacancies_metro_stations_table, tables_cache)\r\n\r\n vacancy_id = tables_cache[\"id\"]\r\n station_id = tables_cache[\"address_metro_stations_station_id\"]\r\n\r\n if station_id:\r\n write_to_database(database, vacancies_metro_stations_table, {\r\n \"vacancy_id\": vacancy_id,\r\n \"metro_station_id\": station_id\r\n })\r\n return ()", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def collect_stats(self):\n\n df_avg, self.transport_df, self.customer_df, self.manager_df, self.station_df = self.get_stats_dataframes()\n\n columns = []\n if self.config.simulation_name:\n df_avg[\"Simulation Name\"] = self.config.simulation_name\n columns = [\"Simulation Name\"]\n columns += [\"Avg Waiting Time\", \"Avg Total Time\", \"Simulation Time\"]\n if self.config.max_time:\n df_avg[\"Max Time\"] = self.config.max_time\n columns += [\"Max Time\"]\n columns += [\"Simulation Finished\"]\n self.df_avg = df_avg[columns]", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )", "def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")", "def asdf_create(asdf_name, wav_dirs, sta_dir):\n\n with pyasdf.ASDFDataSet(asdf_name) as ds:\n wav_files = []\n for wav_dir in wav_dirs:\n wav_files.extend([os.path.join(root, a_file)\n for root, dirs, files in os.walk(wav_dir)\n for a_file in files])\n for _i, filename in enumerate(wav_files):\n print(\"Adding mseed file %i of %i...\" % (_i+1, len(wav_files)))\n st = read(filename)\n #Add waveforms\n ds.add_waveforms(st, tag=\"raw_recording\")\n sta_files = glob('%s/*' % sta_dir)\n for filename in sta_files:\n ds.add_stationxml(filename)\n return", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def populated_archivist_dataset(archivist_dataset, tmp_path_factory):\n wpath = tmp_path_factory.mktemp(\"archivistds\")\n\n ads = archivist_dataset\n\n dscontent = (\n ('azip/file1.txt', 'zipfile1'),\n ('azip/file2.csv', 'zipfile2_muchcontent'),\n ('atar/file1.txt', 'tarfile1'),\n ('atar/file2.csv', 'tarfile2_muchcontent'),\n )\n srcds = Dataset(wpath / 'srcds').create(**nonoise)\n for fpath, fcontent in dscontent:\n fpath = srcds.pathobj / (PurePosixPath(fpath))\n fpath.parent.mkdir(parents=True, exist_ok=True)\n fpath.write_text(fcontent)\n srcds.save(**nonoise)\n\n archive_root = wpath / 'myarchive'\n #archivetype = 'zip'\n\n akeys = {}\n\n # no ZIP just yet\n # for archivetype, ext in (('zip', ''), ('tar', '.gz')):\n for archivetype, ext in (('tar', '.gz'), ):\n archive_path = Path(f\"{archive_root}.{archivetype}{ext}\")\n\n archive_path_inds = ads.pathobj / '.archives' / archive_path.name\n # create an archive, the easy way, by simply exporting the\n # entire dataset worktree\n srcds.export_archive(archive_root, archivetype=archivetype,\n **nonoise)\n assert archive_path.exists()\n\n # add the archive (in a hidden dir) to be able to reference\n # it via a key\n aurl = archive_path.as_uri()\n ads.repo.call_annex([\n 'addurl', '--file', str(archive_path_inds), aurl])\n ads.save(**nonoise)\n # get the key of the archive\n akeys[archivetype] = ads.status(\n archive_path_inds, annex='basic', return_type='item-or-list',\n **nonoise)['key']\n return ads, akeys, archive_root, dscontent", "def analyze_all(datadir, TPQI_starts, dataruns, save = 1, lower = 38.4):\n dirs = os.listdir(datadir)\n idx = 0\n right_dirs = list()\n\n\n for l in dataruns:\n for k in arange(len(dirs)):\n mark_right = '_interference_'+num2str(l,0) in dirs[k]\n \n if mark_right and (len(dirs[k]) > len('_interference_'+num2str(l,0))+6):\n mark_right = False\n\n if mark_right:\n right_dirs.append(dirs[k])\n idx += 1\n continue\n\n \n if len(right_dirs) == 0:\n print 'Did not find any files'\n\n if len(dataruns) == len(right_dirs):\n print 'Found all files...'\n else:\n print 'Beware, not all files are taken into account, file(s) missing.'\n \n tail_over_time = zeros(len(right_dirs))\n tpqi_starts = TPQI_starts[dataruns]\n statistics_info = zeros([len(right_dirs),4])\n \n for k in arange(len(right_dirs)):\n tail_over_time[k] = tail_cts_per_shot(datapath = datadir+'\\\\'+right_dirs[k], lower = lower, TPQI_starts = tpqi_starts[k], save = save)\n statistics_info[k,:] = analyze_thresholds(datapath = datadir+'\\\\'+right_dirs[k], threshold_lt1 = 0, threshold_lt2 = 9, normalize = True, save = save)\n\n\n os.chdir(datadir)\n percentage_finished = float(k+1)/len(right_dirs)*100\n print 'finished: '+num2str(percentage_finished,0)+'%'\n\n\n if save:\n times_passed_overall_lt1 = statistics_info[:,0]\n times_passed_after_seq_lt1 = statistics_info[:,1]\n times_passed_overall_lt2 = statistics_info[:,2]\n times_passed_after_seq_lt2 = statistics_info[:,3]\n filename = 'statistics_run_'+num2str(dataruns.min(),0)+'_to_'+num2str(dataruns.max(),0)+'.npz' \n savez(filename, tpqi_starts = tpqi_starts, tail_over_time = tail_over_time,\n times_passed_overall_lt1 = times_passed_overall_lt1, \n times_passed_after_seq_lt1 = times_passed_after_seq_lt1, \n times_passed_overall_lt2 = times_passed_overall_lt2,\n times_passed_after_seq_lt2 = times_passed_after_seq_lt2)\n\n \n\n figure3 = plt.figure(figsize=(12.0, 16.0))\n plt.subplot(211)\n plt.plot(dataruns,tail_over_time*1E4, '-k')\n plt.xlabel('TPQI run number')\n plt.ylabel('Tail counts per shot (x 1E-4)')\n plt.grid()\n plt.ylim([0,1.1*max(tail_over_time*1E4)])\n\n plt.subplot(212)\n plt.plot(dataruns,TPQI_starts[0:len(right_dirs)], '-k')\n plt.xlabel('TPQI run number')\n plt.ylabel('TPQI starts per run')\n plt.grid()\n plt.ylim([0, 1.1*TPQI_starts[0:len(right_dirs)].max()])\n if save:\n figure3.savefig('tpqi_starts_and_tail_over_time.png')", "def write_all_data_tables( phasename, eos_prop_d, output_d ):\n\n dataio.write_data_table( 'temperature_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Tmesh_a')),\n ('GPa', 'eV', 1), output_d )\n\n dataio.write_data_table( 'density_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'rhomesh_a')),\n ('GPa', 'eV','g_cc'), output_d )\n dataio.write_data_table( 'heat_capacity_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Cpmesh_a')),\n ('GPa','eV','eV'), output_d )\n\n dataio.write_data_table( 'thermal_exp_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'alphamesh_a')),\n ('GPa','eV',1), output_d )\n\n dataio.write_data_table( 'adiabat_temp_grad_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'dTdP_Smesh_a')),\n ('GPa','eV','GPa-1'), output_d )\n pass", "def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def assemble(self, dt_range=None):\n if dt_range is not None:\n self.dt_list = trace_source.time_list(dt_range[0],\n dt_range[1],\n self.config['time']['step'])\n\n # only for the testcase\n traj_dir = self.config['partposit_dir']\n days_avail = os.listdir(traj_dir)\n # filter only for the trajectory files with tdump extension\n days_avail = [f for f in days_avail if len(f) == 11]\n print(days_avail)\n folders = [f for f in days_avail if datetime.datetime.strptime(f, \"%Y%m%d_%H\") in self.dt_list]\n\n assert len(folders) > 0, 'no folders with flexpart partposit data'\n\n # the defaultdict is used here to sort the files by datetime within a dictionary\n # filtered_files = defaultdict(list)\n # for f in files:\n # # regex the yyyymmdd-hh timestamp in the filename\n # dt = datetime.datetime.strptime(re.search('([0-9]{8})-([0-9]){2}', f).group(0), '%Y%m%d-%H')\n # height = float(re.search('([0-9]{3,6})(?=_0[0-9-]{1,4}.tdump)', f).group(0))\n # #print(f, dt, height)\n # if dt >= self.dt_list[0] and dt <= self.dt_list[-1]:\n # filtered_files[dt].append((f,height))\n\n # here an empty dict is generated with a zero containing array\n self.stat2d_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list))))\n\n self.statls_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list), 7)))\n\n self.raw_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list),\n abs(self.config['time']['tr_duration'])+1)))\n\n # TODO make more than 7 geo names possible\n ng = trace_source.land_sfc.named_geography(self.config['geonames'])\n self.geo_names = ng.geo_names\n no_geo_names = len(list(self.geo_names.keys()))\n self.statgn_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n no_geo_names)))\n\n\n self.lat_names = {0: '<-60', 1: '-60..-30', 2:'-30..0', 3: '0..30', 4: '30..60', 5: '>60'}\n self.statlat_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n len(list(self.lat_names.keys())))))\n\n\n ls = trace_source.land_sfc.land_sfc()\n self.ls_categories = ls.categories\n\n\n for it, dt in enumerate(self.dt_list[:]):\n print('trajectories eding at ', dt)\n files_for_time = os.listdir(traj_dir + dt.strftime(\"%Y%m%d_%H\"))\n files_for_time = sorted([f for f in files_for_time if \"partposit_\" in f])\n folder = traj_dir + dt.strftime(\"%Y%m%d_%H\") + \"/\"\n print('files_for_time ', files_for_time)\n\n print('heights ', len(self.height_list), self.height_list)\n\n flex_stat = [flex_statistics(self.config, ls=ls, ng=ng) for h in self.height_list]\n traj_meta = read_flexpart_traj_meta(folder + \"trajectories.txt\")\n\n self.no_part.append(traj_meta['releases_meta'][1]['no_particles'])\n self.time_res.append(10*24/len(files_for_time))\n\n # different structure than hysplit\n # 1. loop through the ending times of the current day\n # 2. load partposit for a specified time\n # 3. loop through heights\n\n for f in files_for_time:\n print('files_for_time ', f)\n part_pos = read_partpositions(folder + f, 1, ctable=True)\n part_pos = np.array(part_pos)\n\n for ih, h in enumerate(self.height_list):\n #print(\"at \", ih, h)\n this_population = np.where(part_pos[:,0] == ih+1)[0]\n #release_sel = np.array([list(p) for p in part_pos if p[0]==ih+1])\n release_sel = part_pos[this_population, :]\n #assert np.all(release_sel == other_release)\n meta = traj_meta['releases_meta'][ih+1]\n #print(meta)\n assert np.mean(meta['heights']) == h, f\"{meta['heights']} {h} do not fit\"\n flex_stat[ih].add_partposits_gn(release_sel)\n\n flex_stat[ih].add_partposits_ls(release_sel)\n flex_stat[ih].add_partposits_thres(release_sel)\n\n # now assemble the statistics for all heights\n for ih, h in enumerate(self.height_list): \n flex_stat[ih].calc_gn_stat()\n for k in list(flex_stat[ih].stat_gn.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_gn[k].no_below\n print('stat gn ', h, k, flex_stat[ih].stat_gn[k])\n self.statgn_dict[k][it, ih] = list(flex_stat[ih].stat_gn[k].counter.values())\n\n flex_stat[ih].calc_ls_stat()\n for k in list(flex_stat[ih].stat_ls.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_ls[k].no_below\n print('stat ls ', h, k, flex_stat[ih].stat_ls[k])\n self.statls_dict[k][it, ih] = list(flex_stat[ih].stat_ls[k].counter.values())\n\n flex_stat[ih].calc_thres_stat()\n for k in list(flex_stat[ih].stat_lat.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_lat[k].no_below\n print('stat_lat ', h, k, flex_stat[ih].stat_lat[k])\n self.statlat_dict[k][it, ih] = list(flex_stat[ih].stat_lat[k].counter.values())\n\n\n # #assert len(f_list) > 1\n # for ih, f in enumerate(f_list):\n # print(it, ih, f[1], dt)\n # traj = trajectory(self.config)\n # traj.load_file(traj_dir+f[0], silent=True)\n # savepath = '{}/{}'.format(self.config['plot_dir'], dt.strftime('%Y%m%d'))\n\n\n # if \"timeinterval\" in self.config['plotmap']:\n # timeinterval = self.config['plotmap']['timeinterval']\n # else:\n # timeinterval = 12\n # if \"heights\" in self.config['plotmap']:\n # heightlist = self.config['plotmap']['heights']\n # else:\n # heightlist = [1500.0, 3000.0, 4500.0]\n # #if f[1] == 3000.0 and dt.hour % 12 == 0:\n # if f[1] in heightlist and dt.hour % timeinterval == 0:\n # print(\"plotting \", f[1], dt.hour)\n # plot_trajectories_ens(traj, savepath, ls=ls, config=self.config)\n # #continue\n\n # traj.evaluate(silent=True)\n # traj.add_land_sfc(ls, silent=True)\n # traj.add_ensemble_land_sfc(ls)\n # traj.add_ensemble_geo_names(ng)\n # #traj.add_area_land_sfc('md', ls, silent=True)\n # #traj.add_area_land_sfc(2000, ls, silent=True)\n\n # #print(\"at step\", it, dt, ih, f)\n # #print('keys ', traj.statistics.keys())\n # # now the empty dict is filled with the keys (and values) of the statistics dict from traj\n # for k in list(traj.statistics.keys()):\n # self.stat2d_dict[k][it, ih] = traj.statistics[k]\n # # subset of trajectory data to collect\n # param_collect = ['latitude', 'longitude', 'height', \"PRESSURE\", \"AIR_TEMP\",\n # \"RAINFALL\", \"RELHUMID\", \"TERR_MSL\", 'age']\n # if 'land_sfc_category' in list(traj.data.keys()):\n # param_collect.append('land_sfc_category')\n # for k in param_collect:\n # #self.raw_dict[k][it, ih, :traj.data[1][k].shape[0]] = traj.data[1][k]\n # self.raw_dict[k][it, ih, :] = traj.data[1][k]\n # #self.raw_dict[k][it, ih, traj.data[1][k].shape[0]:] = -999.\n\n # for k in list(traj.stat_ls.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_ls[k].no_below\n # print('stat ls ', k, traj.stat_ls[k])\n # self.statls_dict[k][it, ih] = list(traj.stat_ls[k].counter.values())\n\n # for k in list(traj.stat_gn.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_gn[k].no_below\n # print('stat gn ', k, traj.stat_gn[k])\n # self.statgn_dict[k][it, ih] = list(traj.stat_gn[k].counter.values())\n\n # trying to free memory\n del ls\n del ng", "def summarise_data(trip_in, station_data, trip_out):\n # generate dictionary of station - city mapping\n station_map = create_station_mapping(station_data)\n \n with open(trip_out, 'w') as f_out:\n # set up csv writer object \n out_colnames = ['duration', 'start_date', 'start_year',\n 'start_month', 'start_hour', 'weekday',\n 'start_city', 'end_city', 'subscription_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n trip_writer.writeheader()\n \n for data_file in trip_in:\n with open(data_file, 'r') as f_in:\n # set up csv reader object\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n new_point = {}\n \n # convert duration units from seconds to minutes\n ### Question 3a: Add a mathematical operation below ###\n ### to convert durations from seconds to minutes. ###\n new_point['duration'] = float(row['Duration'])/60\n \n # reformat datestrings into multiple columns\n ### Question 3b: Fill in the blanks below to generate ###\n ### the expected time values. ###\n trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')\n new_point['start_date'] = trip_date.strftime('%Y-%m-%d')\n new_point['start_year'] = trip_date.strftime('%Y') # or : trip_date.year\n new_point['start_month'] = trip_date.strftime('%m') # or : trip_date.month\n new_point['start_hour'] = trip_date.strftime('%H') # or : trip_date.hour\n new_point['weekday'] = trip_date.strftime('%a') # or : trip_date.weekday() OR trip_date.isoweekday()\n\n \n # remap start and end terminal with start and end city\n new_point['start_city'] = station_map[row['Start Terminal']]\n new_point['end_city'] = station_map[row['End Terminal']]\n # two different column names for subscribers depending on file\n if 'Subscription Type' in row:\n new_point['subscription_type'] = row['Subscription Type']\n else:\n new_point['subscription_type'] = row['Subscriber Type']\n\n # write the processed information to the output file.\n trip_writer.writerow(new_point)", "def stations():\n \n session = Session(engine)\n # Query to bring all stations\n results = pd.DataFrame(session.query(S.id.label('ID'),S.station.label('Station'),S.name.label('Name'),\\\n S.latitude.label('Latitude'),S.longitude.label('Longitude'), \\\n S.elevation.label('Elevation')).all())\n \n session.close()\n \n # Create a dictionary from the row data of the dataframe and return it as a JSON\n return jsonify(results.to_dict(orient = 'records'))", "def run():\r\n\r\n # Build list of stations\r\n stations = build_station_list()\r\n\r\n # Find 5 stations at which the current level is the highest\r\n stations_highest_rel_level_list = []\r\n N = 5\r\n for i in range(len(stations_highest_rel_level(stations, N))):\r\n stations_highest_rel_level_list.append(stations_highest_rel_level(stations, N)[i][0])\r\n \r\n\r\n # Plot the water level for each of these stations over the past 10 days\r\n \r\n # First fetch the time history for a station\r\n for station in stations:\r\n if station.name in stations_highest_rel_level_list:\r\n \r\n dt = 2\r\n dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))\r\n # This gives list of dates and levels to be passed into a plot\r\n plot_water_level_with_fit(station, dates, levels, 4)\r\n else:\r\n pass", "def create_databases():\n db_connection = connect_to_db()\n\n # Create database tables.\n create_tables(db_connection)\n\n # Populate water tables.\n populate_water_tables(db_connection)\n\n # station_data = get_station_data()\n # station = station_data.query('ŠIFRA == 30301')\n # print(station)\n # index = station.index[0]\n # lat = station.at[index, 'LAT']\n # lng = station.at[index, 'LON']\n # name = f\"{station.at[index, 'VODOMERNA POSTAJA']} ({station.at[index, 'VODOTOK']})\"\n # print(index, lat, lng, name)\n\n # Populate location tables\n # populate_locations(db_connection)\n\n # Populate weather tables\n populate_weather(db_connection)\n\n db_connection.commit()\n db_connection.close()", "def save_data(self, data_dict: dict, table_name: str):\n location = \"Winnipeg, MB\"\n insert_sql = f\"\"\"insert or ignore into {table_name}\n (sample_date, location, min_temp, max_temp, avg_temp)\n values\n (?,?,?,?,?)\"\"\"\n\n with DBOperations(self.name) as dbcm:\n for date, temps in data_dict.items():\n data_tuple = (date, location, temps['Min'] ,temps['Max'], temps['Mean'])\n dbcm.execute(insert_sql, data_tuple)", "def water_supply(osm_path): \n i = retrieve(osm_path,'multipolygons',['man_made'],**{'man_made':[\"='water_tower' or \",\"='water_well' or \",\"='reservoir_covered' or \",\"='water_works'\"]})\n j = retrieve(osm_path,'multipolygons',['landuse'],**{'landuse':[\"='reservoir'\"]})\n \n i = i.rename(columns={'man_made': 'asset'})\n j = j.rename(columns={'landuse': 'asset'})\n \n combined_df = pandas.concat([i,j], ignore_index=True, sort=False) #append objects while ignoring that they may have overlapping index\n if combined_df.empty == True:\n return combined_df\n else:\n return combined_df[[\"osm_id\",\"asset\",\"geometry\"]]", "def produce_all_database(is_debug):\n\tproduce_database([\"apnea-ecg\", \"train\"], is_debug)\n\tproduce_database([\"apnea-ecg\", \"test\"], is_debug)", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def fillstation(self, stanames, all=None, plot=None, summary=None, From=None, To=None, by=None,\n how='mean', variables=None, distance=None, sort_cor=True, constant=True, cor_lim=None):\n\n if all == True:\n stations = self.network.getsta([], all=True).values()\n else:\n stations = self.network.getsta(stanames)\n\n for station in stations:\n staname = station.getpara('stanames')\n\n if variables == None:\n newdataframe = station.getData(reindex=True, From=From, To=To, by=by,\n how=how) # Dataframe which stock the new data of the stations\n newdataframe['U m/s'] = station.getData('U m/s', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['V m/s'] = station.getData('V m/s', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['Ua g/kg'] = station.getData('Ua g/kg', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['Theta C'] = station.getData('Theta C', reindex=True, From=From, To=To, by=by, how=how)\n variables_name = newdataframe.columns\n else:\n newdataframe = station.getData(var=variables, reindex=True, From=From, To=To, by=by,\n how=how) # Dataframe which stock the new data of the stations\n variables_name = variables\n # select and sort nearest stations\n selections, selectionsnames = self.__getpredictors_distance(staname, distance)\n\n for var in variables_name:\n print(\"I\" * 30)\n print(\"variable -> \" + var)\n\n try:\n selections, params = self.__sort_predictors_by_corr(station, selections, var, From, To, by, how,\n constant=constant,\n selectionsnames=selectionsnames,\n sort_cor=sort_cor, cor_lim=cor_lim)\n\n selections_iter = iter(selections)\n params_iter = iter(params)\n # print newdataframe\n idxmissing = newdataframe[var][\n newdataframe[var].isnull() == True].index # slect where their is missing data\n\n while len(idxmissing) > 0:\n print(\"Their is [\" + str(len(idxmissing)) + \"] events missing\")\n\n try: # Try if their is still other stations to fill with\n selection = selections_iter.next()\n param = params_iter.next()\n except StopIteration:\n print(\"NO MORE SELECTED STATIONS\")\n break\n\n try:\n Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled\n X1 = selection[0].getData(var, From=From, To=To, by=by,\n how=how) # stations variable used to fill\n X2 = selection[1].getData(var, From=From, To=To, by=by,\n how=how) # stations variable used to fill\n\n select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()\n\n if constant:\n newdata = param[0] + param[1] * select['X1'] + param[2] * select[\n 'X2'] # reconstruct the data\n else:\n newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data\n\n newdataframe.loc[idxmissing, var] = newdata.loc[idxmissing, var]\n idxmissing = newdataframe[var][\n newdataframe[var].isnull() == True].index # slect where their is missing data\n\n\n except KeyError:\n print(\"&\" * 60)\n print('Selected stations did not fill any events')\n except ValueError:\n print('The variable ' + var + \"Does not exist or no data to do the multilinear regression \")\n\n if plot == True:\n df = pd.concat([Y, X1, X2, newdata, newdataframe[var]],\n keys=['Y', 'X1', 'X2', 'estimated data', 'Estimated replaced'], axis=1,\n join='outer')\n self.plotcomparison(df)\n\n print(\"Their is [\" + str(len(idxmissing)) + \"] FINALLY events missing\")\n # Recalculate the wind direction and speed from the U an V components\n\n try:\n speed, dir = cart2pol(newdataframe['U m/s'], newdataframe['V m/s'])\n newdataframe['Dm G'] = dir\n newdataframe['Sm m/s'] = speed\n except ValueError:\n print\n 'No wind found in the dataframe'\n except KeyError:\n print('No wind found in the dataframe')\n\n self.newdataframes[staname] = newdataframe" ]
[ "0.69976664", "0.58139586", "0.5748849", "0.5552527", "0.5550863", "0.5530793", "0.55279726", "0.5518004", "0.5421447", "0.54047465", "0.5358192", "0.53122264", "0.52683693", "0.5263279", "0.52280056", "0.5153076", "0.51038533", "0.51007193", "0.507282", "0.50530833", "0.50488514", "0.50456476", "0.50298846", "0.50112516", "0.50000465", "0.49984008", "0.49943525", "0.4993162", "0.49832112", "0.49795166", "0.49727067", "0.4969099", "0.49649596", "0.4964497", "0.4952834", "0.49483946", "0.49351195", "0.4934195", "0.4921215", "0.49154627", "0.4912562", "0.49108472", "0.49077004", "0.49049252", "0.48978424", "0.48970366", "0.48966184", "0.48919708", "0.4889385", "0.48891345", "0.48885944", "0.48873556", "0.48856896", "0.48652816", "0.485487", "0.48464245", "0.4844927", "0.48428643", "0.48416328", "0.48298717", "0.4827711", "0.48242176", "0.48128083", "0.47982547", "0.47971055", "0.47878516", "0.47831237", "0.47785854", "0.4775928", "0.47717708", "0.47715655", "0.47695407", "0.47690982", "0.47598824", "0.47513562", "0.47510457", "0.47391307", "0.47313213", "0.47283602", "0.4726484", "0.47223568", "0.472172", "0.4719994", "0.47194037", "0.4719325", "0.47151005", "0.470958", "0.47080472", "0.47074664", "0.47060654", "0.47041047", "0.4702029", "0.46960562", "0.4694235", "0.46813357", "0.46789417", "0.4678458", "0.46775183", "0.46750462", "0.46748582" ]
0.7368926
0
Populate watercourse and aquifer related data tables.
def populate_water_tables(connection): metadata = load_metadata('water') cursor = connection.cursor() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM watercourses') watercourse_count = cursor.fetchone()[0] cursor.execute('SELECT count(*) FROM aquifers') aquifer_count = cursor.fetchone()[0] if watercourse_count and aquifer_count: print('Water tables already populated!') return station_data = get_station_data() for archive in metadata.keys(): print(f'{archive}-water:'.upper()) water_body = get_water_definitions(archive)['body'] # 1. Populate watercourses/aquifers: stations = {} for water_body_name in metadata[archive].keys(): print(f'\tPopulating {water_body}: "{water_body_name}"') cursor.execute(f'''INSERT INTO {water_body}s(location_id, name) VALUES (0, '{water_body_name}')''') water_body_id = cursor.lastrowid # 2. Populate watercourse_stations/aquifer_stations: for station_id in metadata[archive][water_body_name]['stations']: station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name']) if station_id in stations: # Prefer watercourses/aquifer with more stations current_len = len(metadata[archive][water_body_name]['stations']) previous_len = len(metadata[archive][stations[station_id]]['stations']) if current_len < previous_len: print(f'\t\tStation already exists: {station_id} - "{station_name}" ("{water_body_name}")') continue else: cursor.execute(f'''DELETE FROM {water_body}_stations WHERE id = {station_id}''') print(f'\t\tRemoved station: {station_id} - "{station_name}" from "{stations[station_id]}")') stations[station_id] = water_body_name print(f'\t\tPopulating station: {station_id} - "{station_name}"') # Insert station location if station data exists. location_id = 0 station_row = station_data.query(f'ŠIFRA == "{station_id}"') if not station_row.empty: index = station_row.index[0] lat = station_row.at[index, 'LAT'] lng = station_row.at[index, 'LON'] if not np.isnan(lat) and not np.isnan(lng): name = f"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})" cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{name}', {lat}, {lng})''') location_id = cursor.lastrowid # Insert station. cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name) VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''') # 3. Populate watercourse_measurements/aquifer_measurements: if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'], station_id): cursor.execute(f'''DELETE FROM {water_body}_stations WHERE id = {station_id}''') print(f'\t\tRemoved station with useless data: {station_id} - "{station_name}"') # Remove empty watercourses/aquifers. cursor.execute(f'''SELECT w.id, w.name FROM {water_body}s w WHERE NOT EXISTS ( SELECT s.id FROM {water_body}_stations s WHERE w.id = s.{water_body}_id )''') for row in cursor.fetchall(): cursor.execute(f'''DELETE FROM {water_body}s WHERE id = {row[0]}''') print(f'\tRemoved empty {water_body}: "{row[1]}"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_value=5, max_value=20)\n\n for _ in range(number_of_courses):\n course_name = fake.word()\n\n insert_statement = f'insert into courses (name) values (\"{course_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n number_of_users = fake.pyint(min_value=1, max_value=23)\n\n Faker.seed()\n\n for _ in range(number_of_users):\n\n if fake.pybool():\n user_name = f'{fake.first_name_female()} {fake.last_name_female()}'\n else:\n user_name = f'{fake.first_name()} {fake.last_name()}'\n\n insert_statement = f'insert into users (name) values (\"{user_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n for _ in range(50000):\n Faker.seed()\n\n random_user_id = fake.pyint(1, number_of_users)\n random_course_id = fake.pyint(1, number_of_courses)\n Faker.seed()\n random_lesson_no = fake.pyint(3, 12)\n Faker.seed()\n random_exercise_no = fake.pyint(1, 50)\n random_data = fake.sentence()\n\n insert_statement = f\"\"\"insert into saves (user_id, course_id, lesson_no, exercise_no,data) \n values ({random_user_id}, {random_course_id}, {random_lesson_no}, \n {random_exercise_no}, '{random_data}');\"\"\"\n c.execute(insert_statement)\n\n connection.commit()", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def insert_relations_staging(self):\n\n for year in range(START_YEAR_CREATIVE_WORKS, CURRENT_YEAR, YEARS_RANGE):\n self.load_wikidata(\"movie_roles\", MOVIE_ROLES_BY_YEAR_SPARQL_QUERY, INSERT_MOVIE_ROLE_SQL_QUERY, INSERT_MOVIE_ROLE_MAP_COLUMNS, year, YEARS_RANGE)\n\n self.load_wikidata(\"song_roles\", SONG_ROLES_BY_YEAR_SPARQL_QUERY, INSERT_SONG_ROLE_SQL_QUERY, INSERT_SONG_ROLE_MAP_COLUMNS, year, YEARS_RANGE, True)\n self.load_wikidata(\"tvshow_roles\", TVSHOW_ROLES_SPARQL_QUERY, INSERT_TVSHOW_ROLE_SQL_QUERY,\n INSERT_TVSHOW_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"animatedmovie_roles\", ANIMATEDMOVIE_ROLES_SPARQL_QUERY, INSERT_ANIMATEDMOVIE_ROLE_SQL_QUERY,\n INSERT_ANIMATEDMOVIE_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"videogame_roles\", VIDEOGAME_ROLES_SPARQL_QUERY, INSERT_VIDEOGAME_ROLE_SQL_QUERY, INSERT_VIDEOGAME_ROLE_MAP_COLUMNS)\n self.load_wikidata(\"book_roles\", BOOK_ROLES_SPARQL_QUERY, INSERT_BOOK_ROLE_SQL_QUERY, INSERT_BOOk_ROLE_SQL_QUERY)", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def load_dwh_tables(self):\n print(\"Loading the creative works table\")\n self.cur.execute(dwh_queries.INSERT_CREATIVE_WORKS_SQL_QUERY)\n self.conn.commit()\n\n print(\"Loading the participations table\")\n\n self.cur.execute(dwh_queries.INSERT_PARTICIPATIONS_SQL_QUERY)\n self.conn.commit()", "def setUp(self):\n resume.objects.create(\n first_name='Nicholas',\n last_name='Bielinski',\n )\n experience.objects.create(\n title='Helpdesk Technician',\n location='L3 Technologies',\n start_date='6/26/2017',\n end_date='present',\n description='blah blah blah'\n )\n education.objects.create(\n institution_name='UNH Manchester',\n location='Manchester',\n degree='Bachelor',\n major='CIS',\n gpa = '3.5'\n )", "def synthesize_employment_data(self, config):\r\n jobs_by_zone_by_sector_table_name = config['jobs_by_zone_by_sector']\r\n gridcells_table_name = config['gridcells']\r\n jobs_table_name = config['jobs']\r\n gridcells_output_table_name = config['gridcells_output']\r\n jobs_output_table_name = config['jobs_output']\r\n \r\n input_db_name = config['db_config'].database_name\r\n output_db_name = config['output_database_name']\r\n \r\n sectors = config['sector_names_and_ids']\r\n building_types_and_ids_and_home_based = config[\r\n 'building_type_column_names_and_ids_and_home_based']\r\n \r\n building_types = []\r\n building_ids = []\r\n home_based = [] \r\n for type, id, home in building_types_and_ids_and_home_based:\r\n building_types += [type]\r\n building_ids += [id]\r\n home_based += [home]\r\n \r\n \r\n from_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = input_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n to_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = output_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n\r\n FlattenScenarioDatabaseChain().copy_scenario_database(\r\n from_database_configuration = from_database_configuration, \r\n to_database_configuration = to_database_configuration,\r\n tables_to_copy = [gridcells_table_name, jobs_table_name])\r\n \r\n db_server = DatabaseServer(to_database_configuration) \r\n output_database = db_server.get_database(output_db_name)\r\n \r\n sector_name = 0; sector_id = 1\r\n \r\n sector = {}\r\n for entry in sectors:\r\n name = entry[sector_name]\r\n id = entry[sector_id]\r\n sector[id] = self._get_jobs_per_building_type_in_sector_by_zone(\r\n output_database, jobs_by_zone_by_sector_table_name, \r\n jobs_table_name, name, id)\r\n\r\n results = self._get_building_type_proportion_by_zone(output_database, \r\n gridcells_table_name)\r\n \r\n grid_id = 0; zone_id = 1\r\n dist = {}\r\n \r\n type_index = {}\r\n \r\n for name in building_types:\r\n for i in range(len(results[0])):\r\n column_name = results[0][i]\r\n if name == column_name:\r\n type_index[name] = i\r\n break;\r\n else:\r\n raise KeyError, ('No column by the name of \\'%s\\' found in '\r\n 'the database.' % name) \r\n\r\n for name in building_types:\r\n dist[name] = {}\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] = []\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] += [(row[grid_id], \r\n row[type_index[name]])]\r\n \r\n jobs_table_data = self._create_jobs_table_data(dist, sector,\r\n building_types_and_ids_and_home_based)\r\n \r\n output_database.execute('USE %(out_db)s' % {'out_db':output_db_name})\r\n \r\n output_database.execute(\"\"\"\r\n CREATE TABLE %(jobs_out)s (\r\n JOB_ID INT AUTO_INCREMENT, PRIMARY KEY(JOB_ID),\r\n GRID_ID INT, HOME_BASED INT, SECTOR_ID INT, BUILDING_TYPE INT);\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n \r\n if len(jobs_table_data) > 0:\r\n output_prefix = (\r\n \"\"\"INSERT INTO %(jobs_out)s \r\n (GRID_ID, HOME_BASED, SECTOR_ID, BUILDING_TYPE) VALUES\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n output_postfix = ';'\r\n \r\n step = 1000\r\n length = len(jobs_table_data)\r\n iterations = int(length/step) + 1\r\n \r\n for i in range(iterations):\r\n low = i*step\r\n high = (i+1)*step\r\n \r\n if high > length: high = length\r\n \r\n output_body = \"\"\r\n \r\n for j in range(low, high):\r\n output_body += (\r\n '(%(grid)s, %(home)s, %(sector)s, %(building)s),\\n' \r\n % jobs_table_data[j])\r\n \r\n output_query = \"%s%s%s\" % (output_prefix, \r\n output_body[:-2], \r\n output_postfix)\r\n\r\n output_database.execute(output_query)\r\n \r\n \r\n ### TODO: \r", "def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)", "def populate_database(num_patients, min_checkins, max_checkins):\n departments = [\n Department(department_name=\"Cardiology\"),\n Department(department_name=\"Emergency\"),\n Department(department_name=\"Gynecology\"),\n Department(department_name=\"Pediatrics\"),\n Department(department_name=\"Obstetrics\"),\n Department(department_name=\"Oncology\"),\n Department(department_name=\"Orthopedics\"),\n Department(department_name=\"Neurology\")\n ]\n\n for i in xrange(num_patients):\n patient = Patient(**generate_patient())\n patient.departments.append(choice(departments))\n db.add(patient)\n\n for j in xrange(randrange(min_checkins, max_checkins)):\n checkin = CheckIn(**generate_checkin())\n checkin.patient_nhi = patient.nhi\n\n lci = patient.latest_checkin_time\n vid = checkin.checkin_time\n\n lci = vid if lci is None or vid > lci else lci\n patient.latest_checkin_time = lci\n\n db.add(checkin)\n\n for k in xrange(randrange(0, 3)):\n appointment = Appointment(**generate_appointment())\n appointment.patient_nhi = patient.nhi\n\n db.add(appointment)\n\n db.commit()", "def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()", "def _setup_all_awardees():\n hpo_data = _prep_awardee_csv_data('tests/test-data/fixtures/awardees.csv')\n org_data = _prep_awardee_csv_data('tests/test-data/fixtures/organizations.csv')\n site_data = _prep_awardee_csv_data('tests/test-data/fixtures/sites.csv')\n dao = HPODao()\n #\n # Import HPO records\n #\n for column in range(0, len(hpo_data[0]) - 1):\n data = _convert_csv_column_to_dict(hpo_data, column)\n dao.insert(HPO(hpoId=column+1, displayName=data['Name'], name=data['Awardee ID'],\n organizationType=OrganizationType(data['Type']), isObsolete=ObsoleteStatus.ACTIVE))\n #\n # Import Organization records\n #\n with dao.session() as session:\n for column in range(0, len(org_data[0]) - 1):\n data = _convert_csv_column_to_dict(org_data, column)\n result = session.query(HPO.hpoId).filter(HPO.name == data['Awardee ID']).first()\n dao.insert(Organization(externalId=data['Organization ID'], displayName=data['Name'], hpoId=result.hpoId))\n #\n # Import Site records\n #\n with dao.session() as session:\n for column in range(0, len(site_data[0]) - 1):\n data = _convert_csv_column_to_dict(site_data, column)\n result = session.query(Organization.hpoId, Organization.organizationId).\\\n filter(Organization.externalId == data['Organization ID']).first()\n try:\n mayo_link_id = data['MayoLINK Client #']\n except KeyError:\n mayo_link_id = str(random.randint(7040000, 7999999))\n dao.insert(Site(siteName=data['Site'], googleGroup=data['Site ID / Google Group'].lower(),\n mayolinkClientNumber=mayo_link_id, hpoId=result.hpoId,\n organizationId=result.organizationId))", "def setup_vars(self):\n # Add Full time positions\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database', 'OS', 'AI']\", 30)\n self.admin_id = self._add_person(\"Admin\", salary=40)\n self.full_instructor_id = self._add_person(\n \"Instructor\", \"ARRAY['Database']\", 20\n )\n\n # Add Part time instructor\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['OS']\", 10)\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['AI']\", 10)\n\n # Add courses\n self.course_id1 = self._add_course(\"Database\", 1, \"Database\")\n self.course_id2 = self._add_course(\"OS\", 1, \"OS\")\n self.course_id3 = self._add_course(\"AI\", 1, \"AI\")\n\n # Add room\n self.room_id = self._add_room(1, 'Test room', 20)\n self.room_id2 = self._add_room(2, 'Test room 2', 20)\n\n # Add course offerings\n self.course_offering1 = self._add_course_offering('2021-01-21', 10, [('2021-06-21', 9, self.room_id), ('2021-06-21', 11, self.room_id)], '2021-05-31', 20, self.course_id1, self.admin_id)\n self.course_offering2 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id), ('2021-06-22', 11, self.room_id)], '2021-05-31', 20, self.course_id2, self.admin_id)\n self.course_offering3 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id2), ('2021-06-22', 11, self.room_id2)], '2021-05-31', 20, self.course_id3, self.admin_id)\n\n # Add customers\n self.customer_id1 = self._add_customer('Test1', \"test\", 987654321, '[email protected]', '1234123412341234', '123', '2025-05-31')\n self.customer_id2 = self._add_customer('Test2', \"test\", 987654321, '[email protected]', '1234123412341235', '123', '2025-05-31')\n self.customer_id3 = self._add_customer('Test3', \"test\", 987654321, '[email protected]', '1234123412341236', '123', '2025-05-31')\n\n # Register sessions\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id1)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id2)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id3)\n\n # Add course packages\n self.package1 = self._add_course_package(\"Best Package\", 2, '2021-03-01', '2021-08-02', 50)\n self.package2 = self._add_course_package(\"Medium Package\", 2, '2021-03-01', '2021-08-02', 100)\n self.package3 = self._add_course_package(\"Worst Package\", 2, '2021-03-01', '2021-08-02', 150)\n\n # Buy course packages\n self._buy_package(self.customer_id1, self.package1)\n self._buy_package(self.customer_id2, self.package2)\n self._buy_package(self.customer_id3, self.package3)\n\n # Redeem sessions\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id1)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id2)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id3)\n\n # Cancel registrations\n self._cancel_registration(self.customer_id1, self.course_id1)\n self._cancel_registration(self.customer_id2, self.course_id2)", "def insert_entities_staging(self):\n\n for year in range(1900, CURRENT_YEAR, YEARS_RANGE):\n self.load_wikidata(\"movies\", MOVIES_BY_YEAR_SPARQL_QUERY, INSERT_MOVIE_SQL_QUERY, INSERT_MOVIE_MAP_COLUMNS, year, YEARS_RANGE)\n\n self.load_wikidata(\"songs\", SONGS_BY_YEAR_SPARQL_QUERY, INSERT_SONG_SQL_QUERY, INSERT_SONG_MAP_COLUMNS, year, YEARS_RANGE, True)\n self.load_wikidata(\"tvshows\", TVSHOWS_SPARQL_QUERY, INSERT_TVSHOW_SQL_QUERY, INSERT_TVSHOW_MAP_COLUMNS)\n self.load_wikidata(\"animatedmovies\", ANIMATEDMOVIES_SPARQL_QUERY, INSERT_ANIMATEDMOVIE_SQL_QUERY,\n INSERT_ANIMATEDMOVIE_MAP_COLUMNS)\n self.load_wikidata(\"videogames\", VIDEOGAMES_SPARQL_QUERY, INSERT_VIDEOGAME_SQL_QUERY, INSERT_VIDEOGAME_MAP_COLUMNS)\n self.load_wikidata(\"books\", BOOKS_SPARQL_QUERY, INSERT_BOOK_SQL_QUERY, INSERT_BOOK_MAP_COLUMNS)", "def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')", "def _populate():\n models.Base.metadata.create_all(engine)\n logger.info(\"Initalized database\")\n db = Session()\n\n hermann = models.Account(id=\"test\",\n name=\"Hermann Dörkschneider\",\n email=\"[email protected]\")\n db.add(hermann)\n\n journey = models.Journey(id=str(uuid.uuid4()),\n account_id=\"test\",\n visibility=\"PUBLIC\",\n start_time_utc=datetime.datetime.now(),\n stop_time_utc=datetime.datetime.now())\n db.add(journey)\n\n waypoint1 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=2.71,\n latitude=3.1416,\n longitude=1.618,\n height_m=10)\n db.add(waypoint1)\n\n waypoint2 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=5.1,\n latitude=3.1410,\n longitude=1.620,\n height_m=5)\n db.add(waypoint2)\n\n db.commit()\n logger.info(\"Created test account {}\".format(hermann))\n logger.info(\"Created test journey {}\".format(journey))", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def set_up_tables():\n table_users = \"\"\"\n CREATE TABLE IF NOT EXISTS users (\n id SERIAL PRIMARY KEY,\n username VARCHAR (24) NOT NULL UNIQUE,\n firstname VARCHAR (24) NOT NULL,\n lastname VARCHAR (24) NOT NULL,\n othername VARCHAR (24),\n phone VARCHAR (24) NOT NULL,\n email VARCHAR (30) NOT NULL UNIQUE,\n password VARCHAR (128) NOT NULL,\n passportUrl VARCHAR (200),\n isPolitician BOOLEAN,\n isAdmin BOOLEAN\n )\"\"\"\n\n parties_table = \"\"\" \n CREATE TABLE IF NOT EXISTS parties (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n hqAddress VARCHAR (30),\n logoUrl VARCHAR\n )\"\"\"\n\n offices_table = \"\"\"\n CREATE TABLE IF NOT EXISTS offices (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n type VARCHAR (35)\n )\"\"\"\n\n canditates_table = \"\"\"\n CREATE TABLE IF NOT EXISTS candidates (\n id SERIAL,\n candidate INTEGER,\n office INTEGER,\n PRIMARY KEY (office, candidate),\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE\n )\"\"\"\n\n voters_table = \"\"\"\n CREATE TABLE IF NOT EXISTS votes (\n id SERIAL,\n office INTEGER,\n candidate INTEGER,\n voter INTEGER,\n PRIMARY KEY (office, voter),\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE,\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (voter) REFERENCES users(id) ON DELETE CASCADE\n )\"\"\"\n\n return [table_users, parties_table,\n offices_table, canditates_table, voters_table]", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()", "def example_data():\n\n # In case this is run more than once, empty out existing data\n EmployeeProject.query.delete()\n Employee.query.delete()\n Department.query.delete()\n Project.query.delete()\n\n # Add sample employees and departments\n df = Department(dept_code='fin', dept_name='Finance', phone='555-1000')\n dl = Department(dept_code='legal', dept_name='Legal', phone='555-2222')\n dm = Department(dept_code='mktg', dept_name='Marketing', phone='555-9999')\n\n leonard = Employee(name='Leonard', dept=dl)\n liz = Employee(name='Liz', dept=dl)\n maggie = Employee(name='Maggie', state='DC', dept=dm)\n nadine = Employee(name='Nadine')\n\n db.session.add_all([df, dl, dm, leonard, liz, maggie, nadine])\n db.session.commit()\n\n pc = Project(proj_code='car', proj_name='Design Car',\n assignments=[EmployeeProject(emp_id=liz.id, role='Chair'),\n EmployeeProject(emp_id=maggie.id)])\n ps = Project(proj_code='server', proj_name='Deploy Server',\n assignments=[EmployeeProject(emp_id=liz.id),\n EmployeeProject(emp_id=leonard.id, role='Auditor')])\n\n db.session.add_all([ps, pc])\n db.session.commit()", "def prepare_database(self, waterscenario=None, trafficscenario=None):\n\n # Validate input\n if waterscenario:\n waterscenario = Path(waterscenario)\n assert waterscenario.exists(), 'Waterscenario file not found'\n\n BIVAS = pyBIVAS(self.BIVAS_database)\n df_trafficscenarios = BIVAS.trafficscenario_numberoftrips()\n\n\n # Do changes to database:\n con = sqlite3.connect(self.BIVAS_database)\n c = con.cursor()\n\n # Update waterscenario with given file\n if waterscenario:\n # Delete current water_scenario_values\n sql = \"DELETE FROM water_scenario_values WHERE 1\"\n c.execute(sql)\n\n sql = \"DELETE FROM water_scenarios WHERE 1\"\n c.execute(sql)\n\n # Write waterdata to database\n\n # Read waterscenario file\n df = pd.read_csv(waterscenario, header=0, index_col=None)\n df = df[['ArcID', 'SeasonID', 'WaterLevel__m', 'RateOfFlow__m3_s', 'WaterSpeed__m_s', 'WaterDepth__m']]\n df['WaterScenarioID'] = 1\n\n # Add new water_scenario\n df.to_sql('water_scenario_values', con,\n if_exists='append', index=False)\n\n # Rename water_scenario\n # waterscenario_name = waterscenario.stem\n # sql = \"\"\"UPDATE water_scenarios SET Description = \"{}\" WHERE ID = {}\"\"\".format(\n # waterscenario_name, waterscenario)\n # c.execute(sql)\n\n\n waterscenario_id = 1\n waterscenario_name = 'TEST waterscenario'\n waterscenario_type = 1\n sql = \"\"\"INSERT into water_scenarios VALUES ({}, '{}', {})\"\"\".format(\n waterscenario_id,\n waterscenario_name,\n waterscenario_type\n )\n c.execute(sql)\n\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = 1 WHERE 1\"\"\"\n c.execute(sql)\n\n else:\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = NULL WHERE 1\"\"\"\n c.execute(sql)\n\n # Set scenario name and description\n date_string = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.description = f'Date: {date_string}, Waterscenario: {waterscenario}, TrafficScenario: {trafficscenario},'\n\n sql = \"\"\"\n UPDATE scenarios\n SET Name = \"{}\",\n Description = \"{}\"\n WHERE ID = {}\n \"\"\".format(\n self.scenarioName, self.description, self.scenarioID)\n c.execute(sql)\n\n # Update traffic Scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n if trafficscenario:\n if isinstance(trafficscenario, int):\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficscenario)\n c.execute(sql)\n else:\n trafficScenarioID = df_trafficscenarios.index[df_trafficscenarios['Description'] == trafficscenario][0]\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficScenarioID)\n c.execute(sql)\n\n con.commit()\n con.close()\n\n logger.info('BIVAS database copied and updated')", "def update_data(self):\n staff = Staff.objects.all()\n orgs = Organization.objects.all()\n depts = Department.objects.all()\n\n existing = self.all()\n if existing.count():\n existing.delete()\n\n if staff.count():\n for s in staff:\n record = CombinedTeledata(\n id=s.id,\n alpha=s.alpha,\n name=s.name,\n first_name=s.first_name,\n last_name=s.last_name,\n sort_name=s.sort_name,\n email=s.email,\n phone=s.phone,\n postal=s.postal,\n job_position=s.job_position,\n department=s.dept.name,\n dept_id=s.dept.id,\n organization=s.dept.org.name,\n org_id=s.dept.org.id,\n building=s.bldg.name,\n bldg_id=s.bldg.import_id,\n room=s.room,\n from_table='staff'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(s.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if orgs.count():\n for o in orgs:\n record = CombinedTeledata(\n id=o.id,\n name=o.name,\n sort_name=o.name,\n phone=o.phone,\n fax=o.fax,\n building=o.bldg.name,\n bldg_id=o.bldg.import_id,\n room=o.room,\n from_table='organizations'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(o.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if depts.count():\n for d in depts:\n record = CombinedTeledata(\n id=d.id,\n name=d.name,\n sort_name=d.name,\n phone=d.phone,\n fax=d.fax,\n organization=d.org.name,\n org_id=d.org.id,\n building=d.bldg.name,\n bldg_id=d.bldg.import_id,\n room=d.room,\n from_table='departments'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(d.keywords.all())\n except Exception as e:\n logger.error(str(e))", "def create_final_table(conn, county):\r\n for county in county:\r\n query = f\"SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%' AND name = '{county}'\"\r\n result = execute_query(conn, query)\r\n try:\r\n if len(result) == 0:\r\n query = f\"create table {county} as select * from {county}_stg;\"\r\n execute_query(conn, query)\r\n\r\n load_final_table(conn, county)\r\n except Exception as e:\r\n print(f\"This query {query} failed with exception {e}\")", "def example_data():\n\n seed.add_sfpl_branches()\n seed.add_formats()\n\n book_1 = Book(title=\"Alanna: The First Adventure\", author=\"Tamora Pierce\")\n book_2 = Book(title=\"The Hitchhiker's Guide to the Galaxy\", author=\"Douglas Adams\")\n book_3 = Book(title=\"The Hobbit\", author=\"J.R.R. Tolkien\")\n db.session.add(book_1)\n db.session.add(book_2)\n db.session.add(book_3)\n\n esqg = User(first_name=\"Elizabeth\", last_name=\"Goodman\", email=\"[email protected]\", password=\"programmer\")\n db.session.add(esqg)\n db.session.commit()\n\n esqg_gr = GoodreadsUser(user_id=esqg.user_id, goodreads_id=ESQG)\n db.session.add(esqg_gr)\n\n my_mission = UserBranch(branch_code=\"miss\", user_id=esqg.user_id)\n db.session.add(my_mission)\n\n my_main = UserBranch(branch_code=\"main\", user_id=esqg.user_id)\n db.session.add(my_main)\n\n db.session.commit()", "def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)", "def populate_db(self):\n # Get donors\n log.info(\"Populating donors.\")\n\n self.r.hmset('Thomas', {'donations': '500', 'email': '[email protected]', 'city': 'Athens', 'state': 'GA', 'zip': 30606})\n\n self.r.hmset('Ted', {'donations': '1', 'email': '[email protected]', 'city': 'Memphis', 'state': 'TN', 'zip': 38104})\n\n self.r.hmset(\"Bailey\", {'donations': '1000', 'email': '[email protected]', 'city': 'Washington', 'state': 'DC', 'zip': 12345})", "def setUp(self):\r\n super(TestAnswerDistributions, self).setUp()\r\n\r\n self.homework = self.add_graded_section_to_course('homework')\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def setup_data(es_with_collector):\n country_uk = constants.Country.united_kingdom.value.id\n country_us = constants.Country.united_states.value.id\n uk_region = constants.UKRegion.south_east.value.id\n CompanyFactory(\n name='abc defg ltd',\n trading_names=['helm', 'nop'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_uk,\n uk_region_id=uk_region,\n )\n CompanyFactory(\n name='abc defg us ltd',\n trading_names=['helm', 'nop', 'qrs'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_us,\n registered_address_country_id=country_us,\n )\n es_with_collector.flush_and_refresh()", "def fill_target_table(new_data, curs, conn, overwrite=False):\n for i in new_data:\n connect_database.add_target_to_database(list(i), curs, conn, overwrite_exsiting = overwrite)\n conn.commit()", "def seed():\n\n try:\n body_parts = app.config.get(\"BODY_PARTS\")\n body_parts_model = get_class_by_tablename(\"body_parts\")\n\n current_records = body_parts_model.query.all()\n\n if current_records:\n for key, value in body_parts.items():\n if not body_parts_model.find(key):\n\n body_parts_model.create(id=key, name=value)\n\n else:\n\n for key, value in body_parts.items():\n body_parts_model.create(id=key, name=value)\n\n try:\n body_parts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except:\n body_parts_model.session.rollback()\n print('Body parts records already exist in database.')\n\n try:\n body_subparts = app.config.get(\"BODY_SUBPARTS\")\n body_subparts_model = get_class_by_tablename(\"subparts\")\n\n current_records = body_subparts_model.query.all()\n\n if current_records:\n for key, value in body_subparts.items():\n if not body_subparts_model.find(key):\n body_subparts_model.create(id=key,\n name=value['name'],\n coordinates=value['coordinates'],\n active=value['active'],\n body_part_id=value['body_part_id']\n )\n\n else:\n for key, value in body_subparts.items():\n\n body_subparts_model.create(id=key,\n name=value['name'],\n coordinates=value['coordinates'],\n active=value['active'],\n body_part_id=value['body_part_id']\n )\n try:\n body_subparts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n body_subparts_model.session.rollback()\n print(e)\n print('Subparts records already exist in database.')\n\n\n try:\n symptoms = app.config.get(\"SYMPTOMS\")\n symptoms_model = get_class_by_tablename(\"symptoms\")\n\n current_records = symptoms_model.query.all()\n\n if current_records:\n for key, value in symptoms.items():\n if not symptoms_model.find(key):\n symptoms_model.create(id=key,\n name=value['name'],\n active=value['active']\n )\n\n else:\n for key, value in symptoms.items():\n symptoms_model.create(id=key,\n name=value['name'],\n active=value['active']\n )\n try:\n symptoms_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n symptoms_model.session.rollback()\n print(e)\n print('Symptom records already exist in database.')\n\n\n try:\n suggestions = app.config.get(\"SUGGESTIONS\")\n suggestions_model = get_class_by_tablename(\"suggestions\")\n\n current_records = suggestions_model.query.all()\n\n if current_records:\n for key, value in suggestions.items():\n if not suggestions_model.find(key):\n suggestions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description'],\n link=value['link'],\n video_start=value['video_start'],\n video_end=value['video_end']\n )\n\n else:\n for key, value in suggestions.items():\n suggestions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description'],\n link=value['link'],\n video_start=value['video_start'],\n video_end=value['video_end']\n )\n try:\n suggestions_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n suggestions_model.session.rollback()\n print(e)\n print('Suggestion records already exist in database.')\n\n try:\n conditions = app.config.get(\"CONDITIONS\")\n conditions_model = get_class_by_tablename(\"conditions\")\n symptoms_model = get_class_by_tablename(\"symptoms\")\n suggestions_model = get_class_by_tablename(\"symptoms\")\n symptom_relation = app.config.get(\"CONDITION_SYMPTOM_RELATION\")\n suggestion_relation = app.config.get(\"CONDITION_SUGGESTION_RELATION\")\n\n current_records = conditions_model.query.all()\n\n if current_records:\n for key, value in conditions.items():\n if not conditions_model.find(key):\n conditions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description']\n )\n\n else:\n for key, value in conditions.items():\n conditions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description']\n )\n\n for key, value in symptom_relation.items():\n condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n symptom = symptoms_model.query.filter(symptoms_model.id == value['symptom_id']).first()\n condition.symptoms.append(symptom)\n\n # for key, value in suggestion_relation.items():\n # condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n # suggestion = suggestions_model.query.filter(suggestions_model.id == value['suggestion_id']).first()\n # condition.suggestions.append(suggestion)\n\n try:\n conditions_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n conditions_model.session.rollback()\n print(e)\n print('Condition records already exist in database.')\n\n try:\n body_subparts_model = get_class_by_tablename(\"subparts\")\n relationship = app.config.get(\"SUBPARTS_CONDITION_RELATION\")\n conditions_model = get_class_by_tablename(\"conditions\")\n\n for key, value in relationship.items():\n subpart = body_subparts_model.query.filter(body_subparts_model.id == value['subpart_id']).first()\n condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n subpart.conditions.append(condition)\n try:\n body_subparts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n body_subparts_model.session.rollback()\n print(e)\n print('Subparts Relation records already exist in database.')", "async def populate_test_data(self):\n async with (await self._get_connection_pool()).acquire() as conn:\n await conn.execute('delete from foglamp.tasks')\n await conn.execute('delete from foglamp.schedules')\n await conn.execute('delete from foglamp.scheduled_processes')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep1', '[\"python3\", \"../scripts/sleep.py\", \"1\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep10', '[\"python3\", \"../scripts/sleep.py\", \"10\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep30', '[\"python3\", \"../scripts/sleep.py\", \"30\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep5', '[\"python3\", \"../scripts/sleep.py\", \"5\"]')''')", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def create_tables():\n commands = (\n \"\"\"\n CREATE TABLE STUDENT(\n ADMISSION INT PRIMARY KEY NOT NULL,\n NAME TEXT NOT NULL,\n AGE INT NOT NULL,\n COURSE CHAR(50),\n DEPARTMENT CHAR(50)\n )\n \"\"\",\n \"\"\"\n CREATE TABLE vendors (\n vendor_id SERIAL PRIMARY KEY,\n vendor_name VARCHAR(255) NOT NULL\n )\n \"\"\",\n \"\"\" CREATE TABLE parts (\n part_id SERIAL PRIMARY KEY,\n part_name VARCHAR(255) NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE part_drawings (\n part_id INTEGER PRIMARY KEY,\n file_extension VARCHAR(5) NOT NULL,\n drawing_data BYTEA NOT NULL,\n FOREIGN KEY (part_id)\n REFERENCES parts (part_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\",\n \"\"\"\n CREATE TABLE vendor_parts (\n vendor_id INTEGER NOT NULL,\n part_id INTEGER NOT NULL,\n PRIMARY KEY (vendor_id , part_id),\n FOREIGN KEY (vendor_id)\n REFERENCES vendors (vendor_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (part_id)\n REFERENCES parts (part_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\")\n\n conn = None\n try:\n # read the connection parameters\n params = config()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n # create table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def load_products():\n\n for i, row in enumerate(open(\"seed_data/category.product\")):\n row = row.rstrip()\n name = row.split(\"|\")\n product_category = ProductCategoryModel(name=name)\n db.session.add(product_category)\n\n for i, row in enumerate(open(\"seed_data/product.product\")):\n row = row.rstrip()\n name, short_description, long_description, product_category_id, img_path_xs, img_path_sm, img_path_md, img_path_lg = row.split(\"|\")\n product = ProductModel(name=name,\n short_description=short_description,\n long_description=long_description,\n product_category_id=product_category_id,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg)\n db.session.add(product)\n\n for i, row in enumerate(open(\"seed_data/location.product\")):\n row = row.rstrip()\n name, description, address1, address2, city, state, zip_code, country, latitude, longitude, direction_url = row.split(\"|\")\n location = LocationModel(name=name,\n description=description,\n address1=address1,\n address2=address2,\n city=city,\n state=state,\n zip_code=zip_code,\n country=country,\n latitude=latitude,\n longitude=longitude,\n direction_url=direction_url)\n db.session.add(location)\n\n for i, row in enumerate(open(\"seed_data/location_product.product\")):\n row = row.rstrip()\n location_id, product_id, price, num_available = row.split(\"|\")\n location_product = LocationProductModel(location_id=location_id,\n product_id=product_id,\n price=price,\n num_available=num_available)\n db.session.add(location_product)\n\n db.session.commit()", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def intialize_db(self):\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS \"rooms\" (\n \"id\" integer PRIMARY KEY AUTOINCREMENT,\n \"name\" text UNIQUE,\n \"type\" text\n );\n \"\"\")\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS \"fellows\" (\n \"id\" integer PRIMARY KEY AUTOINCREMENT,\n \"name\" text,\n \"accomodation\" text\n );\n \"\"\")\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS \"staff\" (\n \"id\" integer PRIMARY KEY AUTOINCREMENT,\n \"name\" text\n );\n \"\"\")\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS \"fellows_rooms\" (\n \"id\" integer PRIMARY KEY AUTOINCREMENT,\n \"fellow_id\" integer NOT NULL REFERENCES \"fellows\" (\"id\"),\n \"room_id\" integer NOT NULL REFERENCES \"rooms\" (\"id\")\n );\n \"\"\")\n self.db.query(\"\"\"\n CREATE TABLE IF NOT EXISTS \"staff_rooms\" (\n \"id\" integer PRIMARY KEY AUTOINCREMENT,\n \"staff_id\" integer NOT NULL REFERENCES \"staff\" (\"id\"),\n \"room_id\" integer NOT NULL REFERENCES \"rooms\" (\"id\")\n );\n \"\"\")", "def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()", "def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)", "def __create_tables(self):\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS federations(\n id INTEGER PRIMARY KEY UNIQUE,\n name TEXT,\n category TEXT\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS clubs(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n dpt TEXT,\n nb_clubs INT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(i)\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS by_age(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n age TEXT,\n sex TEXT,\n nb INT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(id)\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS by_dpt(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n dpt TEXT,\n nb INT,\n sex TEXT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(id)\n )\n \"\"\")\n\n # Save all the changes\n self.save()", "def update_static_data(source: list):\r\n connect = sqlite3.connect(\"REDB_v2.sqlite\")\r\n cursor = connect.cursor()\r\n districts = []\r\n streets = []\r\n series = []\r\n amenities = []\r\n building_types = []\r\n for realestate in source:\r\n if realestate.street not in streets:\r\n streets.append(realestate.street)\r\n if realestate.series not in series:\r\n series.append(realestate.series)\r\n if realestate.building not in building_types:\r\n building_types.append(realestate.building)\r\n if realestate.district not in districts:\r\n districts.append(realestate.district)\r\n if realestate.amenities not in amenities:\r\n amenities.append(realestate.amenities)\r\n for item in streets:\r\n try:\r\n cursor.execute(\"INSERT INTO Streets (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in districts:\r\n try:\r\n cursor.execute(\"INSERT INTO Districts (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in series:\r\n try:\r\n cursor.execute(\"INSERT INTO Series (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in amenities:\r\n try:\r\n cursor.execute(\"INSERT INTO Amenities (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n for item in building_types:\r\n try:\r\n cursor.execute(\"INSERT INTO Buildings (Name) VALUES ('\" + item + \"')\")\r\n except sqlite3.IntegrityError:\r\n pass\r\n connect.commit()\r\n connect.close()", "def init():\n database.create_tables([Tracker])\n database.commit()", "def __init__(self):\n self._db = db\n # Connect to DB\n self._db.connect()\n # Create tables\n self._db.create_tables([Teachers, Parents, Tutors, Students, Homework, Groups, StudentsGroups, Courses])\n # Create filling entries\n self.__create_dummies()\n self._db.close()", "def load_data(data):\n\n # Agencies\n name = data['name']\n slug = Agency.slug_for(name)\n\n a, created = Agency.objects.get_or_create(slug=slug, name=name)\n\n a.abbreviation = data['abbreviation']\n a.description = data.get('description')\n a.keywords = data.get('keywords')\n a.common_requests = data.get('common_requests', [])\n a.no_records_about = data.get('no_records_about', [])\n\n # Only has a single, main branch/office\n if len(data['departments']) == 1:\n dept_rec = data['departments'][0]\n contactable_fields(a, dept_rec)\n\n a.save()\n add_request_time_statistics(data, a)\n\n # Offices\n if len(data['departments']) > 1:\n for dept_rec in data['departments']:\n if dept_rec.get('top_level'):\n # This is actually an agency\n sub_agency_name = dept_rec['name']\n sub_agency_slug = Agency.slug_for(sub_agency_name)\n\n sub_agency, created = Agency.objects.get_or_create(\n slug=sub_agency_slug, name=sub_agency_name)\n sub_agency.parent = a\n\n abbreviation = build_abbreviation(sub_agency_name)\n sub_agency.abbreviation = abbreviation\n sub_agency.description = dept_rec.get('description')\n sub_agency.keywords = dept_rec.get('keywords')\n sub_agency.common_requests = dept_rec.get(\n 'common_requests', [])\n sub_agency.no_records_about = dept_rec.get(\n 'no_records_about', [])\n contactable_fields(sub_agency, dept_rec)\n sub_agency.save()\n add_request_time_statistics(dept_rec, sub_agency)\n else:\n # Just an office\n office_name = dept_rec['name']\n office_slug = Office.slug_for(office_name)\n full_slug = slug + '--' + office_slug\n\n o, created = Office.objects.get_or_create(\n agency=a, slug=full_slug)\n\n o.office_slug = office_slug\n o.name = office_name\n contactable_fields(o, dept_rec)\n o.save()\n add_request_time_statistics(dept_rec, a, o)", "def create_tables():\n inf(\"Creating tables\")\n \n pinners = Table('pinners', metadata,\n Column('pinner_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n pinners.create()\n \n contents = Table('contents', metadata,\n Column('content_id', Integer, primary_key=True),\n Column('url', String(80)),\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id'))\n )\n contents.create()\n\n reviewers = Table('reviewers', metadata,\n Column('reviewer_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n reviewers.create()\n\n complaints = Table('complaints', metadata,\n Column('complaint_id', Integer, primary_key=True),\n Column('complaint_timestamp', DateTime), # when the complaint was filed\n Column('complaint_type', String(80)), # objectionable, copyright\n Column('process_status', String(20)), # complaint, review, done\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('review_timestamp', DateTime), # when the compliant was resolved\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id')),\n Column('reviewer_id', Integer, ForeignKey('reviewers.reviewer_id')),\n Column('content_id', Integer, ForeignKey('contents.content_id'))\n )\n complaints.create()\n \n # could create a table of \"near by\" images and/or near by features and \n # include these in the review", "def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def _initialize(self):\n for s in Subsidiary.all():\n self.__create_stock(s)\n self.get_stock()", "def _create_intermediate_new_tables(self):\n with futures.ThreadPoolExecutor(max_workers=self._nworkers) as executor:\n # Create all the tables and populate initial tables. Need to commit so that threadpool jobs can\n # see the results\n with self._conn:\n self._run_intermediate_table_job(self._conn, self._create_intermediate_new_tables_structure)\n # Create utility tables\n self._run_intermediate_table_job(self._conn, self._populate_blocking_conditions_table,\n description='blocking conditions')\n self._run_intermediate_table_job(self._conn, self._populate_mcc_mnc_table,\n description='MCC-MNC operator mappings')\n # Create new blacklist table\n self._run_intermediate_table_job(self._conn, self._populate_new_blacklist,\n description='IMEIs to blacklist')\n\n # Create required notifications and pairings tables in parallel before we can kick off the per-MNO\n # pairing and and notifications. These jobs have the responsibilities of kicking off those per-MNO jobs\n futures_to_cb = {}\n self._queue_intermediate_table_job(executor,\n futures_to_cb,\n self._populate_new_notifications_lists,\n 'per-MNO notifications for all operators')\n self._queue_intermediate_table_job(executor,\n futures_to_cb,\n self._populate_new_exceptions_lists,\n 'per-MNO exceptions for all operators')\n self._wait_for_futures(futures_to_cb)", "def insert_into_db(self, database):\n\n # insert person\n keys = \"\"\n values = \"\"\n for key, value in self.person.items():\n # location\n if key == \"location\":\n # ensure location is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * FROM p21_cdm.location WHERE city='{value['city']}' \n AND zip='{value['zip']}') THEN INSERT INTO p21_cdm.location (city, zip) \n VALUES ('{value['city']}', '{value['zip']}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.person (location_id, {keys[:-1]}) \n VALUES((SELECT location_id \n FROM p21_cdm.location\n WHERE city='{self.person['location']['city']}' \n and zip='{self.person['location']['zip']}'), \n {values[:-1]})\"\"\")\n\n # insert visits\n for visit in self.visits:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n for key, value in visit.items():\n if key == \"care_site_name\":\n # ensure care site is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * \n FROM p21_cdm.care_site \n WHERE care_site_name='{value}') \n THEN INSERT INTO p21_cdm.care_site (care_site_name) \n VALUES ('{value}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.visit_occurrence (care_site_id, {keys[:-1]}) \n VALUES((SELECT care_site_id\n FROM p21_cdm.care_site\n WHERE care_site_name='{visit['care_site_name']}'),\n {values[:-1]}) \n RETURNING visit_occurrence_id\"\"\")\n\n # insert measurements, observations, conditions & procedures\n for data, tablename in [(self.measurements, \"measurement\"),\n (self.observations, \"observation\"),\n (self.conditions, \"condition_occurrence\"),\n (self.procedures, \"procedure_occurrence\")]:\n for entry in data:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n\n for key, value in entry.items():\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n entry[\"sql_id\"] = database.select(f\"\"\"INSERT INTO p21_cdm.{tablename}({keys[:-1]})\n VALUES({values[:-1]}) RETURNING {tablename}_id\"\"\")[0][0]\n\n # insert fact_relationships in both directions\n for table1, entry1, table2, entry2 in self.fact_relations:\n # 44818890 = Finding associated with (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table1}','{entry1['sql_id']}','{table2}','{entry2['sql_id']}','44818890')\"\"\")\n # 44818792 = Associated with finding (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table2}','{entry2['sql_id']}','{table1}','{entry1['sql_id']}','44818792')\"\"\")\n\n # make transactions persistent\n database.commit()", "def populate_db():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('../data/personjob.db') # navigate relative path to the db\n\n logger.info('Working with Department class')\n logger.info('Creating department records')\n\n department_number = 0\n department_name = 1\n department_manager = 2\n\n departments = [\n ('A111', 'Asset Management', 'Dave Sanders'),\n ('B222', 'Human Resources', 'Tammy Murray'),\n ('C333', 'Payroll', 'Daddy Warbucks'),\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for department in departments:\n with database.transaction():\n new_department = Department.create(\n department_number = department[department_number],\n department_name = department[department_name],\n deptartment_manager = department[department_manager]\n )\n new_department.save()\n logger.info('Department has been added to the database')\n\n logger.info('Reading and print all department data...')\n for saved_department in Department:\n logger.info(f'{saved_department.department_name} ' + \\\n f'Manager: {saved_department.department_manager}. ' + \\\n f'Department number: {saved_department.department_number}')\n\n except Exception as e:\n logger.info(f'Error creating = {department[department_number]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def insert_db():\n populate_tables()", "def example_data():\n\n db.create_all()\n\n\n #Empty out data from previous runs\n User.query.delete()\n Book.query.delete()\n Rating.query.delete()\n\n #Add sample users, books, and ratings\n\n #sample users\n user1 = User(user_id=1, email='[email protected]', password='password')\n user2 = User(user_id=2, email='[email protected]', password='password')\n user3 = User(user_id=3, email='[email protected]', password='password')\n user4 = User(user_id=4, email='[email protected]', password='password')\n user5 = User(user_id=5, email='[email protected]', password='password')\n\n #sample books\n book1 = Book(book_id=7627, work_id=16683183, isbn='0007331789', title='Death of Kings (The Saxon Stories, #6)', author='Bernard Cornwell')\n book2 = Book(book_id=7695, work_id=16947613, isbn='0007350430', title='The Time of My Life', author='Cecelia Ahern')\n book3 = Book(book_id=69, work_id=15524542, isbn='0007442912', title='Insurgent (Divergent #2)', author='Veronica Roth')\n book4 = Book(book_id=3327, work_id=23906880, isbn='0007491433', title='The Shock of the Fall', author='Nathan Filer')\n book5 = Book(book_id=8387, work_id=67116, isbn='0099464691', title='The White Lioness (Kurt Wallander, #3)', author='Henning Mankell')\n\n\n #sample ratings\n rating1 = Rating(rating_id=1, book_id=7627, user_id=1, score=5)\n rating2 = Rating(rating_id=2, book_id=7627, user_id=2, score=5)\n rating3 = Rating(rating_id=3, book_id=7627, user_id=3, score=3)\n rating4 = Rating(rating_id=4, book_id=7627, user_id=4, score=3)\n rating5 = Rating(rating_id=5, book_id=7627, user_id=5, score=1)\n rating6 = Rating(rating_id=6, book_id=8387, user_id=1, score=5)\n rating7 = Rating(rating_id=7, book_id=8387, user_id=2, score=5)\n rating8 = Rating(rating_id=8, book_id=8387, user_id=3, score=3)\n rating9 = Rating(rating_id=9, book_id=8387, user_id=4, score=3)\n rating10 = Rating(rating_id=10, book_id=8387, user_id=5, score=1)\n rating11 = Rating(rating_id=11, book_id=69, user_id=5, score=5)\n rating12 = Rating(rating_id=12, book_id=3327, user_id=5, score=5)\n rating13 = Rating(rating_id=13, book_id=3327, user_id=2, score=5)\n\n #Add all to session and commit\n db.session.add_all([user1, user2, user3, user4, user5, book1, book2, book3, \n book4, book5, rating1, rating2, rating3, rating4, \n rating5, rating6, rating7, rating8, rating9, rating10, rating11,\n rating12, rating13])\n db.session.commit()", "def build(self):\n \n # create db with empty tables\n dbpath, config = self.setup()\n \n # avoid work if setup decided db exists and build can be skipped\n if dbpath is None:\n return \n \n # check prerequisite files \n obopath = check_file(config.obo, dbpath, \"obo\")\n refpath = check_file(config.reference_phenotypes, dbpath, \n \"reference_phenotypes\")\n freqpath = check_file(config.phenotype_frequencies, dbpath,\n \"phenotype_frequencies\")\n\n self.logger.msg1(\"Loading ontology\") \n obo = MinimalObo(obopath, True)\n \n self.logger.msg1(\"Preparing phenotype frequencies\")\n fill_phenotype_frequency_table(dbpath, freqpath)\n \n # fill database with data\n self.logger.msg1(\"Preparing references\")\n fill_concise_reference_table(dbpath, refpath) \n fill_complete_reference_table(dbpath, obo, config) \n \n self._end()", "def insert_humans_staging(self):\n for year in range(1880, CURRENT_YEAR):\n self.load_wikidata(\"humans\", HUMANS_BY_YEAR_SPARQL_QUERY, INSERT_HUMAN_SQL_QUERY,\n INSERT_HUMAN_MAP_COLUMNS, year=year)", "def createSchoolEnrollmentDataSet(baseyear_tazdata_df, tazdata_df):\n HIGH_SCHOOL_ENROLLMENT_MODEL_SHARE_OF_HS_AGE_KIDS_ENROLLED_IN_HS = 0.966\n\n ageShares = pandas.read_csv(SCHOOLAGE_DISTRIBUTION)\n collegeStudentShares = pandas.read_csv(COLLEGEAGE_DISTRIBUTION)\n\n logging.debug(\"Age shares:\\n{}\".format(ageShares.head()))\n logging.debug(\"College Student shares:\\n{}\".format(collegeStudentShares.head()))\n\n # join to tazdata, baseyear_tazdata_df\n tazdata_df = pandas.merge(left= tazdata_df, right=ageShares, how=\"left\")\n tazdata_df = pandas.merge(left= tazdata_df, right=collegeStudentShares, how=\"left\")\n baseyear_tazdata_df = pandas.merge(left=baseyear_tazdata_df, right=ageShares, how=\"left\")\n baseyear_tazdata_df = pandas.merge(left=baseyear_tazdata_df, right=collegeStudentShares, how=\"left\")\n\n # compute the number of high school students -- assume X percent of kids this age attend high school, as calculated by Chuck\n tazdata_df[\"highSchoolStudents\"] = tazdata_df[\"AGE0519\"]* tazdata_df[\"14to17In05to19\"]*HIGH_SCHOOL_ENROLLMENT_MODEL_SHARE_OF_HS_AGE_KIDS_ENROLLED_IN_HS\n baseyear_tazdata_df[\"highSchoolStudents\"] = baseyear_tazdata_df[\"AGE0519\"]*baseyear_tazdata_df[\"14to17In05to19\"]*HIGH_SCHOOL_ENROLLMENT_MODEL_SHARE_OF_HS_AGE_KIDS_ENROLLED_IN_HS\n\n # compute the number of college students\n tazdata_df[\"collegeStudents\"] = ( tazdata_df[\"AGE0519\"]* tazdata_df[\"18to19In05to19\"]* tazdata_df[\"StudentsAge18to19\"]) + \\\n ( tazdata_df[\"AGE2044\"]* tazdata_df[\"20to24In20to44\"]* tazdata_df[\"StudentsAge20to24\"]) + \\\n ( tazdata_df[\"AGE2044\"]* tazdata_df[\"25to44In20to44\"]* tazdata_df[\"StudentsAge25to44\"])\n baseyear_tazdata_df[\"collegeStudents\"] = (baseyear_tazdata_df[\"AGE0519\"]*baseyear_tazdata_df[\"18to19In05to19\"]*baseyear_tazdata_df[\"StudentsAge18to19\"]) + \\\n (baseyear_tazdata_df[\"AGE2044\"]*baseyear_tazdata_df[\"20to24In20to44\"]*baseyear_tazdata_df[\"StudentsAge20to24\"]) + \\\n (baseyear_tazdata_df[\"AGE2044\"]*baseyear_tazdata_df[\"25to44In20to44\"]*baseyear_tazdata_df[\"StudentsAge25to44\"])\n\n # sum high school and college students by county\n baseyear_students_df = baseyear_tazdata_df[[\"COUNTY\",\"highSchoolStudents\",\"collegeStudents\"]].groupby(\"COUNTY\").agg(\"sum\")\n forecast_students_df = tazdata_df[[\"COUNTY\",\"highSchoolStudents\",\"collegeStudents\"]].groupby(\"COUNTY\").agg(\"sum\")\n\n # rename and join baseyear and forecast county tables\n baseyear_students_df.rename(columns={\"highSchoolStudents\":\"highSchoolStudents_baseyear\", \"collegeStudents\":\"collegeStudents_baseyear\"}, inplace=True)\n forecast_students_df.rename(columns={\"highSchoolStudents\":\"highSchoolStudents_forecast\", \"collegeStudents\":\"collegeStudents_forecast\"}, inplace=True)\n students_df = pandas.merge(left=baseyear_students_df, right=forecast_students_df, left_index=True, right_index=True)\n\n students_df[\"highSchoolGrowthRatio\"] = students_df[\"highSchoolStudents_forecast\"]/students_df[\"highSchoolStudents_baseyear\"]\n students_df[\"collegeGrowthRatio\" ] = students_df[\"collegeStudents_forecast\"] /students_df[\"collegeStudents_baseyear\"]\n logging.debug(\"students_df:\\n{}\".format(students_df))\n\n # join growth ratios to baseyear_tazdata_df and apply\n baseyear_tazdata_df = pandas.merge(left=baseyear_tazdata_df, right=students_df[[\"highSchoolGrowthRatio\",\"collegeGrowthRatio\"]], how=\"left\", left_on=\"COUNTY\", right_index=True)\n baseyear_tazdata_df[\"HSENROLL_forecast\"] = baseyear_tazdata_df[\"HSENROLL\"]*baseyear_tazdata_df[\"highSchoolGrowthRatio\"]\n baseyear_tazdata_df[\"COLLFTE_forecast\"] = baseyear_tazdata_df[\"COLLFTE\"] *baseyear_tazdata_df[\"collegeGrowthRatio\"]\n baseyear_tazdata_df[\"COLLPTE_forecast\"] = baseyear_tazdata_df[\"COLLPTE\"] *baseyear_tazdata_df[\"collegeGrowthRatio\"]\n\n # join result to tazdata_df\n tazdata_df = pandas.merge(left=tazdata_df, right=baseyear_tazdata_df[[\"ZONE\",\"HSENROLL_forecast\",\"COLLFTE_forecast\",\"COLLPTE_forecast\"]], how=\"left\", on=\"ZONE\")\n tazdata_df.rename(columns={\"HSENROLL_forecast\":\"HSENROLL\",\n \"COLLFTE_forecast\" :\"COLLFTE\",\n \"COLLPTE_forecast\" :\"COLLPTE\"}, inplace=True)\n\n # remove intermediate variables\n tazdata_df.drop(columns=[\"05to13In05to19\",\"14to17In05to19\",\"18to19In05to19\",\"25to44In20to44\",\"20to24In20to44\",\n \"StudentsAge18to19\",\"StudentsAge20to24\",\"StudentsAge25to44\",\n \"highSchoolStudents\",\"collegeStudents\"], inplace=True)\n return tazdata_df", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def __create_dummies(self):\n zero_teacher = Teachers.get_or_create(teacher_key='0', UID='0')\n zero_parent = Parents.get_or_create(parent_key='0', UID='0')\n zero_tutor = Tutors.get_or_create(tutor_key='0', UID='0')\n zero_student = Students.get_or_create(student_key='0', UID='0', parent=zero_parent[0])\n zero_course = Courses.get_or_create(course_key='0')\n zero_group = Groups.get_or_create(group_key='0', teacher=zero_teacher[0], tutor=zero_tutor[0],\n course=zero_course[0])", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def load_disasters():\n\n print \"Disasters\"\n\n #deletes any data within the table before seeding\n Disaster.query.delete()\n\n\n result_count = 1000\n iteration = 0\n records_returned = 1000\n\n # makes payload requests from FEMA API\n while records_returned == 1000:\n payload = {'$top': result_count,\n '$skip': result_count * iteration,\n '$inlinecount': 'allpages',\n '$filter': 'declarationDate ge \\'1990-01-01T04:00:00.000z\\'',\n '$select': 'disasterNumber,declarationDate,state,incidentType,title,incidentBeginDate,incidentEndDate,placeCode,declaredCountyArea'}\n r = requests.get(\n \"http://www.fema.gov/api/open/v1/DisasterDeclarationsSummaries\",\n params=payload)\n\n # iteration counter, starts at zero, for every iteration add 1\n iteration += 1\n\n disaster_info = r.json()\n metadata = disaster_info['metadata']\n record_count = metadata['count']\n records_returned = len(disaster_info['DisasterDeclarationsSummaries'])\n\n # parsing through the information returned from API\n for incident_dict in disaster_info['DisasterDeclarationsSummaries']:\n disasterNumber = incident_dict.get('disasterNumber')\n declarationDate = incident_dict.get('declarationDate')\n state = incident_dict.get('state')\n incidentType = incident_dict.get('incidentType')\n title = incident_dict.get('title')\n incidentBeginDate = incident_dict.get('incidentBeginDate')\n incidentEndDate = incident_dict.get('incidentEndDate')\n placeCode = incident_dict.get('placeCode')\n declaredCountyArea = incident_dict.get('declaredCountyArea')\n\n \"\"\"Try/Except does two things: the try is doing a check to see if the county is already in the counties tables and if it is, then setting the Disaster.countyArea_id in the disasters table. The except is occuring only when the NoResultFound occurs and is creating the county and adding it to the counties table.\"\"\"\n try:\n #variable county set to \"answer\" of query\n county_check = County.query.filter(County.county_name==declaredCountyArea, County.state_code==state).one()\n\n countyArea_id = county_check.county_id\n\n # creating a county when NoResultFound error occurs\n except NoResultFound:\n\n county = County(state_code=state,\n county_name=declaredCountyArea)\n\n db.session.add(county)\n #!!!!!!!!! ask bonnie about this again !!!!!!!!!!#\n db.session.flush()\n\n countyArea_id = county.county_id\n\n county = County.query.filter(County.county_name==declaredCountyArea, County.state_code==state).one()\n countyArea_id = county.county_id\n\n disaster = Disaster(disasterNumber=disasterNumber,\n state=state,\n declarationDate=declarationDate,\n incidentType=incidentType,\n title=title,\n incidentBeginDate=incidentBeginDate,\n incidentEndDate=incidentEndDate,\n placeCode=placeCode,\n declaredCountyArea=declaredCountyArea,\n countyArea_id=countyArea_id)\n\n db.session.add(disaster)\n\n db.session.commit()\n print \"Disasters and Counties seeded\"", "def create_tables(cur, country_json, xml_state, body_json):\n print(\"Creating the 3 first tables...\")\n cur.execute('CREATE TABLE IF NOT EXISTS country_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_total_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_total_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_total_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_albums(ID INTEGER PRIMARY KEY, state TEXT, year INTEGER, genre TEXT, album TEXT, amount INTEGER)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO country_albums VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' %\n (idx, body_json['state'], body_json['year'], body_json['genre'], album,\n xml_state[0][0][idx].text))", "def populate_db(dbase):\n # In this order: Iron, Blood, Shadow, Fel, Storm\n wowhead_ids = []\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-8))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-9))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-10))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-11))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-17))\n item_ids = set(wowhead_ids)\n print(item_ids)\n\n pos = 0\n for item_id in item_ids:\n if pos % 10 == 0:\n print(\"Relic %d of %d\" % (pos, len(item_ids)))\n pos += 1\n import_relic(dbase, item_id)", "def populate_db(dbase):\n\n enchants = [\n # Cloak\n {\n 'spell_id': 5432,\n 'stats': {'agility': 150},\n 'icon': 'inv_enchant_formulagood_01',\n 'item_name': 'Enchant Cloak - Word of Agility',\n 'equip_location': 16,\n 'tooltip_spell': 128546\n },\n {\n 'spell_id': 5435,\n 'stats': {'agility': 200},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Cloak - Binding of Agility',\n 'equip_location': 16,\n 'tooltip_spell': 128549\n },\n\n # Ring\n {\n 'spell_id': 5423,\n 'stats': {'crit': 150},\n 'icon': 'inv_enchant_formulagood_01',\n 'item_name': 'Enchant Ring - Word of Critical Strike',\n 'equip_location': 11,\n 'tooltip_spell': 128537\n },\n {\n 'spell_id': 5424,\n 'stats': {'haste': 150},\n 'icon': 'inv_enchant_formulagood_01',\n 'item_name': 'Enchant Ring - Word of Haste',\n 'equip_location': 11,\n 'tooltip_spell': 128538,\n },\n {\n 'spell_id': 5425,\n 'stats': {'mastery': 150},\n 'icon': 'inv_enchant_formulagood_01',\n 'item_name': 'Enchant Ring - Word of Mastery',\n 'equip_location': 11,\n 'tooltip_spell': 128539,\n },\n {\n 'spell_id': 5426,\n 'stats': {'versatility': 150},\n 'icon': 'inv_enchant_formulagood_01',\n 'item_name': 'Enchant Ring - Word of Versatility',\n 'equip_location': 11,\n 'tooltip_spell': 128540,\n },\n {\n 'spell_id': 5427,\n 'stats': {'crit': 200},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Ring - Binding of Critical Strike',\n 'equip_location': 11,\n 'tooltip_spell': 128541,\n },\n {\n 'spell_id': 5428,\n 'stats': {'haste': 200},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Ring - Binding of Haste',\n 'equip_location': 11,\n 'tooltip_spell': 128542,\n },\n {\n 'spell_id': 5429,\n 'stats': {'mastery': 200},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Ring - Binding of Mastery',\n 'equip_location': 11,\n 'tooltip_spell': 128543,\n },\n {\n 'spell_id': 5430,\n 'stats': {'versatility': 200},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Ring - Binding of Versatility',\n 'equip_location': 128544,\n },\n\n # Neck\n {\n 'spell_id': 5437,\n 'stats': {},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Neck - Mark of the Claw',\n 'equip_location': 2,\n 'tooltip_spell': 128551,\n 'is_proc': True\n },\n {\n 'spell_id': 5438,\n 'stats': {},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Neck - Mark of the Distant Army',\n 'equip_location': 2,\n 'tooltip_spell': 128552,\n 'is_proc': True\n },\n {\n 'spell_id': 5439,\n 'stats': {},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Neck - Mark of the Hidden Satyr',\n 'equip_location': 2,\n 'tooltip_spell': 128553,\n 'is_proc': True\n },\n {\n 'spell_id': 5890,\n 'stats': {'mastery': 300},\n 'icon': 'inv_enchant_formulasuperior_01',\n 'item_name': 'Enchant Neck - Mark of the Trained Soldier',\n 'equip_location': 2,\n 'tooltip_spell': 141909,\n 'is_proc': False\n }\n ]\n\n for enchant in enchants:\n dbase.enchants.replace_one({'spell_id': enchant['spell_id']},\n enchant, upsert=True)", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def setUp(self):\n # Create domain tables required for the test\n super().setUp()\n\n # Load the test data\n condition_occurrence_data_template = self.jinja_env.from_string(\"\"\"\n CREATE OR REPLACE TABLE `{{project_id}}.{{dataset_id}}.condition_occurrence`\n (\n condition_occurrence_id int64, \n person_id int64, \n condition_concept_id int64, \n stop_reason STRING,\n condition_source_value STRING,\n condition_status_source_value STRING)\n AS (\n WITH w AS (\n SELECT ARRAY<STRUCT<\n condition_occurrence_id int64, \n person_id int64, \n condition_concept_id int64, \n stop_reason STRING,\n condition_source_value STRING,\n condition_status_source_value STRING\n >>\n [(1, 1, 0, 'stop reason', 'source value', 'status'),\n (2, 1, 0, 'stop reason', 'source value', 'status'),\n (3, 1, 0, 'stop reason', 'source value', 'status'),\n (4, 1, 0, 'stop reason', 'source value', 'status')] col\n )\n SELECT \n condition_occurrence_id, \n person_id, \n condition_concept_id, \n stop_reason,\n condition_source_value,\n condition_status_source_value \n FROM w, UNNEST(w.col))\n \"\"\")\n\n # Load the test data\n observation_data_template = self.jinja_env.from_string(\"\"\"\n CREATE OR REPLACE TABLE `{{project_id}}.{{dataset_id}}.observation`\n (\n observation_id int64,\n person_id int64,\n observation_concept_id int64,\n observation_source_concept_id int64,\n value_as_string STRING,\n observation_source_value STRING,\n unit_source_value STRING,\n qualifier_source_value STRING,\n value_source_value STRING\n )\n AS (\n -- 1585250 corresponds to the zipcode concept that is not subject to string suppression, value_as_string for this record should be kept --\n WITH w AS (\n SELECT ARRAY<STRUCT<\n observation_id int64, \n person_id int64, \n observation_concept_id int64,\n observation_source_concept_id int64,\n value_as_string STRING, \n observation_source_value STRING,\n unit_source_value STRING,\n qualifier_source_value STRING,\n value_source_value STRING\n >>\n [(1, 1, 0, 1585250, '111111', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (2, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (3, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (4, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (5, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (6, 1, 0, 715711, 'foo_date', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value')] col\n )\n SELECT \n observation_id,\n person_id,\n observation_concept_id,\n observation_source_concept_id,\n value_as_string, \n observation_source_value,\n unit_source_value,\n qualifier_source_value,\n value_source_value \n FROM w, UNNEST(w.col))\n \"\"\")\n\n insert_condition_query = condition_occurrence_data_template.render(\n project_id=self.project_id, dataset_id=self.dataset_id)\n\n insert_observation_query = observation_data_template.render(\n project_id=self.project_id, dataset_id=self.dataset_id)\n\n # Load test data\n self.load_test_data([\n f'''{insert_condition_query};\n {insert_observation_query};'''\n ])", "def init_beeswax_db(cls):\n global _INITIALIZED\n if _INITIALIZED:\n return\n\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s' % {'db': cls.db_name}, wait=True)\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s_other' % {'db': cls.db_name}, wait=True)\n\n if cls.load_data:\n\n data_file = cls.cluster.fs_prefix + u'/beeswax/sample_data_échantillon_%d.tsv'\n\n # Create a \"test_partitions\" table.\n CREATE_PARTITIONED_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`test_partitions` (foo INT, bar STRING)\n PARTITIONED BY (baz STRING, boom INT)\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\" % {'db': cls.db_name}\n make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)\n cls._make_data_file(data_file % 1)\n\n LOAD_DATA = \"\"\"\n LOAD DATA INPATH '%(data_file)s'\n OVERWRITE INTO TABLE `%(db)s`.`test_partitions`\n PARTITION (baz='baz_one', boom=12345)\n \"\"\" % {'db': cls.db_name, 'data_file': data_file % 1}\n make_query(cls.client, LOAD_DATA, wait=True, local=False)\n\n # Insert additional partition data into \"test_partitions\" table\n ADD_PARTITION = \"\"\"\n ALTER TABLE `%(db)s`.`test_partitions` ADD PARTITION(baz='baz_foo', boom=67890) LOCATION '%(fs_prefix)s/baz_foo/boom_bar'\n \"\"\" % {'db': cls.db_name, 'fs_prefix': cls.cluster.fs_prefix}\n make_query(cls.client, ADD_PARTITION, wait=True, local=False)\n\n # Create a bunch of other tables\n CREATE_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`%(name)s` (foo INT, bar STRING)\n COMMENT \"%(comment)s\"\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\"\n\n # Create a \"test\" table.\n table_info = {'db': cls.db_name, 'name': 'test', 'comment': 'Test table'}\n cls._make_data_file(data_file % 2)\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)\n\n if is_live_cluster():\n LOG.warning('HUE-2884: We cannot create Hive UTF8 tables when live cluster testing at the moment')\n else:\n # Create a \"test_utf8\" table.\n table_info = {'db': cls.db_name, 'name': 'test_utf8', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 3, 'utf-8')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)\n\n # Create a \"test_latin1\" table.\n table_info = {'db': cls.db_name, 'name': 'test_latin1', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 4, 'latin1')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)\n\n # Create a \"myview\" view.\n make_query(cls.client, \"CREATE VIEW `%(db)s`.`myview` (foo, bar) as SELECT * FROM `%(db)s`.`test`\" % {'db': cls.db_name}, wait=True)\n\n _INITIALIZED = True", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def initialize_database(study_file, grasp_file, commit_every=250000,\n progress=False):\n rows = 0\n count = commit_every\n pphenos = {}\n phenos = {}\n platforms = {}\n populations = {}\n\n\n # Create tables\n _, engine = get_session()\n print('Dropping and creating database tables, this may take a while if',\n 'the old database is large.')\n if _config['DEFAULT']['DatabaseType'] == 'sqlite':\n cfile = _os.path.isfile(_config['sqlite']['DatabaseFile'])\n if _os.path.isfile(cfile):\n _os.remove(cfile)\n _Base.metadata.drop_all(engine)\n _Base.metadata.create_all(engine)\n print('Tables created.')\n conn = engine.connect()\n\n # Get tables\n study_table = _Study.__table__\n snp_table = _SNP.__table__\n pheno_table = _Phenotype.__table__\n pcat_table = _PhenoCats.__table__\n plat_table = _Platform.__table__\n pop_table = _Population.__table__\n\n # Create insert statements\n study_ins = study_table.insert()\n snp_ins = snp_table.insert()\n pheno_ins = pheno_table.insert()\n pcat_ins = pcat_table.insert()\n plat_ins = plat_table.insert()\n pop_ins = pop_table.insert()\n phsnp_ins = _snp_pheno_assoc.insert()\n phstudy_ins = _study_pheno_assoc.insert()\n plstudy_ins = _study_plat_assoc.insert()\n\n # Unique ID counters\n spare_id = 1\n pheno_id = 1\n pcat_id = 1\n plat_id = 1\n pop_id = 1\n\n # Lists to hold records\n pheno_records = []\n pcat_records = []\n plat_records = []\n pop_records = []\n study_records = []\n snp_records = []\n phsnp_records = []\n phstudy_records = []\n plstudy_records = []\n\n # Platform parsing regex\n plat_parser = _recompile(r'^([^[]*)\\[([^]]+)\\]?(.*)')\n\n # Build study information from study file\n print('Parsing study information.')\n with _open_zipped(study_file) as fin:\n # Drop header\n fin.readline()\n\n if progress:\n pbar = _tqdm(total=2083, unit='studies')\n for line in fin:\n f = line.rstrip().split('\\t')\n\n # Get primary phenotype\n ppheno = _cleanstr(f[7].strip())\n if ppheno not in pphenos:\n pheno_records.append({'phenotype': ppheno,\n 'id': pheno_id})\n pphenos[ppheno] = pheno_id\n pheno_id += 1\n\n # Get phenotype categories\n pheno_cats = f[8].strip().split(';')\n our_phenos = []\n for pcat in pheno_cats:\n pcat = pcat.strip()\n if not pcat:\n continue\n if pcat not in phenos:\n pcat_records.append({\n 'id': pcat_id,\n 'category': pcat,\n 'alias': pheno_synonyms[pcat],\n })\n phenos[pcat] = pcat_id\n pcat_id += 1\n our_phenos.append(phenos[pcat])\n\n # Get platform info\n our_platforms = []\n try:\n plat, snp_count, impt = [\n i.strip() for i in plat_parser.findall(f[18].strip())[0]\n ]\n imputed = True if impt == '(imputed)' else False\n plats = _split_mesy_list(plat)\n for plat in plats:\n plat = plat.strip()\n if plat not in platforms:\n plat_records.append({'id': plat_id,\n 'platform': plat})\n platforms[plat] = plat_id\n plat_id += 1\n our_platforms.append(platforms[plat])\n except IndexError:\n plat, snp_count, impt = None, None, None\n imputed = None\n\n # Get population description\n try:\n pop = f[19].strip()\n try:\n pop = pop_correction[pop]\n except KeyError:\n pass\n if pop not in populations:\n pop_records.append({'id': pop_id,\n 'population': pop})\n populations[pop] = pop_id\n pop_id += 1\n population = populations[pop]\n except IndexError:\n population = None\n\n # Set populaion flags\n pflag = _PopFlag\n disc_pop = pflag(0)\n rep_pop = pflag(0)\n l = len(f)\n if l > 22 and f[22]:\n disc_pop |= pflag.eur\n if l > 23 and f[23]:\n disc_pop |= pflag.afr\n if l > 24 and f[24]:\n disc_pop |= pflag.east_asian\n if l > 25 and f[25]:\n disc_pop |= pflag.south_asian\n if l > 26 and f[26]:\n disc_pop |= pflag.his\n if l > 27 and f[27]:\n disc_pop |= pflag.native\n if l > 28 and f[28]:\n disc_pop |= pflag.micro\n if l > 29 and f[29]:\n disc_pop |= pflag.arab\n if l > 30 and f[30]:\n disc_pop |= pflag.mix\n if l > 31 and f[31]:\n disc_pop |= pflag.uns\n if l > 32 and f[32]:\n disc_pop |= pflag.filipino\n if l > 33 and f[33]:\n disc_pop |= pflag.indonesian\n if l > 35 and f[35]:\n rep_pop |= pflag.eur\n if l > 36 and f[36]:\n rep_pop |= pflag.afr\n if l > 37 and f[37]:\n rep_pop |= pflag.east_asian\n if l > 38 and f[38]:\n rep_pop |= pflag.south_asian\n if l > 39 and f[39]:\n rep_pop |= pflag.his\n if l > 40 and f[40]:\n rep_pop |= pflag.native\n if l > 41 and f[41]:\n rep_pop |= pflag.micro\n if l > 42 and f[42]:\n rep_pop |= pflag.arab\n if l > 43 and f[43]:\n rep_pop |= pflag.mix\n if l > 44 and f[44]:\n rep_pop |= pflag.uns\n if l > 45 and f[45]:\n rep_pop |= pflag.filipino\n if l > 46 and f[46]:\n rep_pop |= pflag.indonesian\n\n # Set the global population flag\n pop_flag = disc_pop | rep_pop\n\n # Create study\n study_records.append({\n 'id': int(f[0]),\n 'author': _cleanstr(f[1]),\n 'pmid': _cleanstr(f[2]),\n 'grasp_ver': 1 if '1.0' in f[3] else 2,\n 'noresults': True if f[4] else False,\n 'results': int(f[5]),\n 'qtl': True if f[6] == '1' else False,\n 'phenotype_id': pphenos[ppheno],\n 'phenotype_desc': ppheno,\n 'phenotype': pphenos[ppheno],\n 'phenotype_cats': our_phenos,\n 'datepub': _get_date(f[9]),\n 'in_nhgri': _get_bool(f[10]),\n 'journal': _cleanstr(f[11]),\n 'title': _cleanstr(f[12]),\n 'locations': _cleanstr(f[13]),\n 'mf': _get_bool(f[14]),\n 'mf_only': _get_bool(f[15]),\n 'sample_size': _cleanstr(f[16]),\n 'replication_size': _cleanstr(f[17]),\n 'platforms': platforms,\n 'snp_count': snp_count,\n 'imputed': imputed,\n 'population_id': population,\n 'population': population,\n 'total': int(f[20]),\n 'total_disc': int(f[21]),\n 'pop_flag': int(pop_flag),\n 'disc_pop_flag': int(disc_pop),\n 'european': int(f[22]) if l > 22 and f[22] else None,\n 'african': int(f[23]) if l > 23 and f[23] else None,\n 'east_asian': int(f[24]) if l > 24 and f[24] else None,\n 'south_asian': int(f[25]) if l > 25 and f[25] else None,\n 'hispanic': int(f[26]) if l > 26 and f[26] else None,\n 'native': int(f[27]) if l > 27 and f[27] else None,\n 'micronesian': int(f[28]) if l > 28 and f[28] else None,\n 'arab': int(f[29]) if l > 29 and f[29] else None,\n 'mixed': int(f[30]) if l > 30 and f[30] else None,\n 'unspecified': int(f[31]) if l > 31 and f[31] else None,\n 'filipino': int(f[32]) if l > 32 and f[32] else None,\n 'indonesian': int(f[33]) if l > 33 and f[33] else None,\n 'total_rep': int(f[34]) if l > 34 and f[34] else None,\n 'rep_pop_flag': int(rep_pop),\n 'rep_european': int(f[35]) if l > 35 and f[35] else None,\n 'rep_african': int(f[36]) if l > 36 and f[36] else None,\n 'rep_east_asian': int(f[37]) if l > 37 and f[37] else None,\n 'rep_south_asian': int(f[38]) if l > 38 and f[38] else None,\n 'rep_hispanic': int(f[39]) if l > 39 and f[39] else None,\n 'rep_native': int(f[40]) if l > 40 and f[40] else None,\n 'rep_micronesian': int(f[41]) if l > 41 and f[41] else None,\n 'rep_arab': int(f[42]) if l > 42 and f[42] else None,\n 'rep_mixed': int(f[43]) if l > 43 and f[43] else None,\n 'rep_unspecified': int(f[44]) if l > 44 and f[44] else None,\n 'rep_filipino': int(f[45]) if l > 45 and f[45] else None,\n 'rep_indonesian': int(f[46]) if l > 46 and f[46] else None,\n })\n\n # Create association records\n for i in our_phenos:\n phstudy_records.append({'study_id': int(f[0]),\n 'pheno_id': i})\n for i in our_platforms:\n plstudy_records.append({'study_id': int(f[0]),\n 'platform_id': i})\n\n pbar.update()\n\n pbar.close()\n print('Writing study information...')\n conn.execute(pheno_ins, pheno_records)\n conn.execute(pcat_ins, pcat_records)\n conn.execute(plat_ins, plat_records)\n conn.execute(pop_ins, pop_records)\n conn.execute(study_ins, study_records)\n conn.execute(phstudy_ins, phstudy_records)\n conn.execute(plstudy_ins, plstudy_records)\n print('Done')\n\n # Reinitialize lists for main GRASP parser\n pheno_records = []\n pcat_records = []\n plat_records = []\n pop_records = []\n\n # Get full study info from database for use in SNPs\n sinfo = conn.execute(_select([study_table.c.id, study_table.c.pmid])).fetchall()\n studies = {}\n for i, p in sinfo:\n studies[p] = i\n no_pmid = {\n 'Dissertation (https://openaccess.leidenuniv.nl/handle/1887/17746)': 1,\n 'KARE Genomewide Association Study of Blood Pressure Using Imputed SNPs': 2,\n 'Genome-wide Association Study Identification of a New Genetic Locus with Susceptibility to Osteoporotic Fracture in the Korean Population.': 3,\n 'Genome-wide Association Study Identified TIMP2 Genetic Variant with Susceptibility to Osteoarthritis': 4,\n 'Application of Structural Equation Models to Genome-wide Association Analysis ': 5,\n 'Comparison of Erythrocyte Traits Among European, Japanese and Korean': 6,\n 'Genomewide Association Study Identification of a New Genetic Locus with Susceptibility to Osteoporotic Fracture in the Korean Population': 7,\n 'Joint identification of multiple genetic variants of obesity in A Korean Genome-wide association study': 8,\n 'Genome-Wide Association Analyses on Blood Pressure Using Three Different Phenotype Definitions': 9,\n 'Association of intronic sequence variant in the gene encoding spleen tyrosine kinase with susceptibility to vascular dementia': 10,\n }\n\n print('Parsing SNP information...')\n with _open_zipped(grasp_file, encoding='latin1') as fin:\n # Drop header\n fin.readline()\n\n if progress:\n pbar = _tqdm(total=8864717, unit='snps')\n\n for line in fin:\n f = line.rstrip().split('\\t')\n\n # Get primary phenotype\n ppheno = _cleanstr(f[11])\n # These are poorly curated, so there is no need to use a\n # separate table for them.\n # if ppheno not in pphenos:\n # conn.execute(pheno_ins.values(\n # phenotype=ppheno\n # ))\n # pphenos[ppheno] = conn.execute(\n # select([pheno_table.c.id]).where(\n # pheno_table.c.phenotype == ppheno\n # )\n # ).first()[0]\n\n # Get phenotype categories\n pheno_cats = f[13].strip().split(';')\n our_phenos = []\n for pcat in pheno_cats:\n pcat = pcat.strip()\n if not pcat:\n continue\n if pcat not in phenos:\n pcat_records.append({\n 'id': pcat_id,\n 'category': pcat,\n 'alias': pheno_synonyms[pcat],\n })\n phenos[pcat] = pcat_id\n pcat_id += 1\n our_phenos.append(phenos[pcat])\n\n # Get population description\n try:\n pop = f[23].strip()\n try:\n pop = pop_correction[pop]\n except KeyError:\n pass\n if pop not in populations:\n pop_records.append({'id': pop_id,\n 'population': pop})\n populations[pop] = pop_id\n pop_id += 1\n population = populations[pop]\n except IndexError:\n population = None\n\n # Create record for SNP\n try:\n sid = int(f[0])\n except ValueError:\n sid = spare_id\n spare_id += 1\n l = len(f)\n try:\n study = studies[f[7].strip()]\n except KeyError:\n study = no_pmid[f[17].strip()]\n record = {\n 'id': sid,\n 'NHLBIkey': f[0],\n 'HUPfield': f[1],\n 'LastCurationDate': _get_date(f[2]),\n 'CreationDate': _get_date(f[3]),\n 'snpid': f[4],\n 'chrom': f[5],\n 'pos': int(f[6]),\n 'population_id': population,\n 'population': population,\n 'study_id': study,\n 'study': study,\n 'study_snpid': f[8],\n 'paper_loc': f[9],\n 'pval': float(f[10]) if f[10] else None,\n 'phenotype_desc': ppheno,\n 'phenotype_cats': our_phenos,\n }\n record['InGene'] = f[51] if l > 52 else None\n record['NearestGene'] = f[52] if l > 53 else None\n record['InLincRNA'] = f[53] if l > 54 else None\n record['InMiRNA'] = f[54] if l > 55 else None\n record['InMiRNABS'] = f[55] if l > 56 else None\n record['dbSNPfxn'] = f[56] if l > 57 else None\n record['dbSNPMAF'] = f[57] if l > 58 else None\n record['dbSNPinfo'] = f[58] if l > 59 else None\n record['dbSNPvalidation'] = f[59] if l > 60 else None\n record['dbSNPClinStatus'] = f[60] if l > 61 else None\n record['ORegAnno'] = f[61] if l > 62 else None\n record['ConservPredTFBS'] = f[62] if l > 63 else None\n record['HumanEnhancer'] = f[63] if l > 64 else None\n record['RNAedit'] = f[64] if l > 65 else None\n record['PolyPhen2'] = f[65] if l > 66 else None\n record['SIFT'] = f[66] if l > 67 else None\n record['LSSNP'] = f[67] if l > 68 else None\n record['UniProt'] = f[68] if l > 69 else None\n record['EqtlMethMetabStudy'] = f[69] if l > 70 else None\n snp_records.append(record)\n\n # Create association records\n for i in our_phenos:\n phsnp_records.append({'snp_id' : sid, 'pheno_id' : i})\n\n # Decide when to execute\n if count:\n count -= 1\n else:\n if progress:\n pbar.write('Writing rows...')\n else:\n print('Writing rows...')\n if pcat_records:\n conn.execute(pcat_ins, pcat_records)\n if plat_records:\n conn.execute(plat_ins, plat_records)\n if pop_records:\n conn.execute(pop_ins, pop_records)\n conn.execute(snp_ins, snp_records)\n conn.execute(phsnp_ins, phsnp_records)\n if progress:\n pbar.write('{} rows written'.format(rows))\n else:\n print('{} rows written'.format(rows))\n count = commit_every-1\n pcat_records = []\n plat_records = []\n pop_records = []\n snp_records = []\n phsnp_records = []\n rows += 1\n if progress:\n pbar.update()\n\n # Final insert\n pbar.close()\n print('Writing final rows...')\n conn.execute(snp_ins, snp_records)\n conn.execute(phsnp_ins, phsnp_records)\n print('{} rows written'.format(rows))\n print('Done!')", "def setUp(self):\n try:\n # Get default data from medical_forum_data_dump.sql, populate tables and connect to DB\n ENGINE.populate_tables()\n self.connection = ENGINE.connect()\n\n # In case of error/exception in populating tables, clear all tables data\n except Exception as exception:\n print(exception)\n ENGINE.clear()", "def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)", "def create_all_tables(self):\n pass", "def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def load_fhwa_records():\n print('--- Importing FHWA DFLTD v.2 records ---')\n for i in tqdm(range(len(tbl_project))):\n prj_id = tbl_project['lng_KeyProject'][i]\n\n expl_in_project = tbl_exploration[\n tbl_exploration.lng_KeyProject == prj_id].index\n for i_exp in expl_in_project:\n expl_id = tbl_exploration['txt_KeyExplorationName'][i_exp]\n\n piles_in_project = tbl_deepfoundation[\n tbl_deepfoundation.lng_KeyProject == prj_id].index\n for i_pile in piles_in_project:\n pile_id = tbl_deepfoundation['lng_KeyDeepFoundation'][i_pile]\n\n tests_for_pile = tbl_loadtest[\n (tbl_loadtest.lng_KeyProject == prj_id) &\n (tbl_loadtest.lng_KeyDeepFoundation == pile_id)\n ].index\n for i_lt in tests_for_pile:\n test_id = tbl_loadtest['lng_KeyLoadTest'][i_lt]\n\n # -- Adding Project Data -------------------------------- #\n if len(piles_in_project) > 1 and len(expl_in_project) < 2:\n wrn = 'Expanded from a project with multiple piles '\\\n 'and/or retests'\n prj = add_loc_proj(i, wrn)\n elif len(piles_in_project) < 2 and len(expl_in_project) > 1:\n wrn = 'Expanded from a project with multiple '\\\n 'explorations'\n prj = add_loc_proj(i, wrn)\n elif len(piles_in_project) > 1 and len(expl_in_project) > 1:\n wrn = 'Expanded from a project with multiple '\\\n 'explorations and multiple piles/retests'\n prj = add_loc_proj(i, wrn)\n else:\n prj = add_loc_proj(i)\n db.session.add(prj)\n\n # -- Adding Exploration Data ---------------------------- #\n exploration = add_expl_data(i_exp, expl_id, prj)\n db.session.add(exploration)\n\n # -- Adding Layer Data ---------------------------------- #\n add_layer_data(prj_id, expl_id, exploration)\n\n # -- Adding Pile Data ----------------------------------- #\n pile = add_pile_data(i_pile, prj_id, pile_id, prj)\n db.session.add(pile)\n\n # -- Adding Load Test Data ------------------------------ #\n load_test = add_load_test_data(i_lt, pile)\n db.session.add(load_test)\n\n # -- Adding Static Test Data ---------------------------- #\n add_static_test_data(prj_id, pile_id, test_id, load_test)\n\n # -- Adding Interpreted Data ---------------------------- #\n add_interp_data(prj_id, pile_id, test_id, load_test)\n\n db.session.commit()", "def fill_in(self,category):\r\n api_json = JsonFromApi(category)\r\n extracted_data = ExtractFromJson(api_json.get_json())\r\n self.fill_in_db = DatabaseUpdator(\r\n extracted_data.extract_json(), self.mydb)\r\n self.fill_in_db.table_product_update()\r\n self.fill_in_db.table_category_update()", "def setUp(self):\n self.listMembre = []\n self.listMembre.append(\"bras gauche\")\n self.listMembre.append(\"bras droit\")\n self.exercice = data.Exercice(\"Mouvement des bras\",self.listMembre,\"Pour rééducation\", \"ex/path\")\n self.exercice_a = data.Exercice(\"Mouvement des jambes\",self.listMembre,\"Pour rééducation\", \"ex/path\")\n self.exercice_b = data.Exercice(\"Mouvement des hanches\",self.listMembre,\"Pour rééducation\", \"ex/path\")\n self.exercice_1 = data.Exercice(\"Mouvement des\",self.listMembre,\"Pour rééducation\", \"ex/path\")\n self.exercice_2 = data.Exercice(\"Mouvement hanches\",self.listMembre,\"Pour rééducation\", \"ex/path\")\n\n self.patient = data.Patient(\"Claude\" , \"123\")\n self.patient_b = data.Patient(\"Gaetan\" , \"789\")\n\n self.patient_1 = data.Patient(\"JP\" , \"456\")\n self.patient_2 = data.Patient(\"JN\" , \"654\")\n\n self.listExo = [self.exercice_a,self.exercice_b]\n self.listExo_b = [self.exercice_1,self.exercice_2]\n self.program_a = data.Program(self.listExo,self.patient_b)\n self.program_b = data.Program(self.listExo_b,self.patient_b)\n\n self.bib_a = [self.exercice,self.exercice_a,self.exercice_b]\n self.bib_b = [self.exercice_1,self.exercice_2]\n\n self.listP_a = [self.patient,self.patient_b]\n self.listP_b = [self.patient_1,self.patient_2]\n\n self.allData = data.AppData(self.bib_a,self.listP_a,[self.program_a,self.program_b])", "def populate_jobs():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Job class')\n logger.info('Creating Job records: just like Person. We use the foreign key')\n\n JOB_NAME = 0\n START_DATE = 1\n END_DATE = 2\n SALARY = 3\n PERSON_EMPLOYED = 4\n DEPARTMENT = 5\n\n jobs = [\n ('Analyst', '2001-09-22', '2003-01-30',65500, 'Andrew', 'ASYS'),\n ('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew', 'ASYS'),\n ('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew', 'BUSI'),\n ('Admin supervisor', '2012-10-01', '2014-11-10', 45900, 'Peter', 'ADMN'),\n ('Admin manager', '2014-11-14', '2018-01-05', 45900, 'Peter', 'ADMN'),\n ('Sr Project Manager', '2014-11-14', '2018-01-05', 100000, 'Ryan', 'ASYS'),\n ('Manager', '2014-11-14', '2018-01-05', 100000, 'Pamela', 'BUSI'),\n ('Director', '2014-11-14', '2018-01-05', 120000, 'Monica', 'MGMT'),\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for job in jobs:\n with database.transaction():\n new_job = Job.create(\n job_name = job[JOB_NAME],\n start_date = job[START_DATE],\n end_date = job[END_DATE],\n duration = dates_diff(job[END_DATE], job[START_DATE]),\n salary = job[SALARY],\n person_employed = job[PERSON_EMPLOYED],\n job_department = job[DEPARTMENT])\n new_job.save()\n\n logger.info('Reading and print all Job rows (note the value of person)...')\n for job in Job:\n logger.info(f'{job.job_name} : {job.start_date} to {job.end_date} for {job.person_employed} in {job.job_department}')\n\n except Exception as e:\n logger.info(f'Error creating = {job[JOB_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def set_up_table(db):\n db.bulk_query(\"\"\"\n SET @@SQL_MODE = REPLACE(@@SQL_MODE, 'STRICT_TRANS_TABLES', '');\n \"\"\")\n db.bulk_query(\"\"\"\n CREATE TABLE cats\n (\n id INT unsigned NOT NULL AUTO_INCREMENT, # Unique ID for the record\n name VARCHAR(150) NOT NULL DEFAULT '', # Name of the cat\n owner VARCHAR(150) NOT NULL DEFAULT '', # Owner of the cat\n birth DATE NOT NULL, # Birthday of the cat\n PRIMARY KEY (id) # Make the id the primary key\n );\n \"\"\")\n db.bulk_query(\"\"\"\n INSERT INTO cats ( name, owner, birth) VALUES\n ( 'Sandy', 'Lennon', '2015-01-03' ),\n ( 'Cookie', 'Casey', '2013-11-13' ),\n ( 'Charlie', 'River', '2016-05-21' );\n \"\"\")", "def prepare_data(self):\r\n annual_df = self.annual_df\r\n coef_df = self.coef_df\r\n quarter_df = self.quarter_df\r\n # historical_df = self.historical_df\r\n Event_Buffer = self.Event_Buffer\r\n\r\n Tot_Prod = coef_df[\"Product\"].nunique()\r\n # Tot_Week = coef_df[\"wk\"].nunique()\r\n Tot_Week = 52\r\n\r\n EDLP_Events = list(annual_df[\"RP_Events\"])\r\n Min_EDLP_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in EDLP_Events\r\n ]\r\n Max_EDLP_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in EDLP_Events\r\n ]\r\n\r\n TPR_Events = list(annual_df[\"TPR_Events\"])\r\n Min_TPR_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in TPR_Events\r\n ]\r\n Max_TPR_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in TPR_Events\r\n ]\r\n\r\n Target_EDLP_Spend = [i for i in annual_df[\"PPG_RP_Spend\"]]\r\n Target_TPR_Spend = [i for i in annual_df[\"PPG_TPR_Spend\"]]\r\n Target_Trade_Spend = [i for i in annual_df[\"PPG_Total_Spend\"]]\r\n\r\n Mapping = {}\r\n Prod_Ind = coef_df[\"Product\"][0:Tot_Prod]\r\n for i, j in zip(Prod_Ind.index, Prod_Ind.values):\r\n Mapping[j] = i\r\n Mapping_reverse = {i: j for j, i in Mapping.items()}\r\n\r\n constants = [i for i in coef_df[\"constant\"]]\r\n\r\n Cat_Coef = coef_df[\"Catalogue\"][0:Tot_Prod]\r\n\r\n Disp_Coef = coef_df[\"Display\"][0:Tot_Prod]\r\n\r\n Base_Price_stg1 = [i for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg1 = []\r\n for pr in range(Tot_Prod):\r\n Intercepts_stg1.append(\r\n np.mean([constants[j * Tot_Prod + pr] for j in range(0, Tot_Week)])\r\n )\r\n\r\n Base_Price_stg2 = [[i] * Tot_Week for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg2 = [\r\n constants[j : j + Tot_Prod] for j in range(0, len(constants), Tot_Prod)\r\n ] # noqa\r\n\r\n EDLP_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Regular\") == 1]]\r\n )\r\n TPR_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Promoted\") == 1]]\r\n )\r\n\r\n # ################################ Available EDLP Interactions pairs ##############################\r\n\r\n EDLP = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Regular\") > 1\r\n ]\r\n EDLP_Interactions = []\r\n for i in EDLP:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n EDLP_Interactions.append(temp)\r\n\r\n # ###################################### Available TPR Interactions pairs #########################\r\n\r\n TPR = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Promoted\") > 1\r\n ]\r\n TPR_Interactions = []\r\n for i in TPR:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n TPR_Interactions.append(temp)\r\n\r\n # ###################################### EDLP_Interaction_Coef_Values ############################\r\n\r\n EDLP_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Regular\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n EDLP_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ###################################### TPR_Interaction_Coef_Values #############################\r\n\r\n TPR_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Promoted\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n TPR_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ##################################### Loading Pantry Loading Coefficients #######################\r\n\r\n Pantry_1 = list(coef_df[\"Pantry_Loading_1\"])\r\n Pantry_1 = [\r\n Pantry_1[j : j + Tot_Prod] for j in range(0, len(Pantry_1), Tot_Prod)\r\n ]\r\n Pantry_2 = list(coef_df[\"Pantry_Loading_2\"])\r\n Pantry_2 = [\r\n Pantry_2[j : j + Tot_Prod] for j in range(0, len(Pantry_2), Tot_Prod)\r\n ]\r\n\r\n # TE_Coeff = np.array(Promo_df[[\"TE_Promo\",\"TE_NoPromo\"]])\r\n self.Tot_Prod = Tot_Prod\r\n self.Tot_Week = Tot_Week\r\n self.EDLP_Events = EDLP_Events\r\n self.Min_EDLP_Events = Min_EDLP_Events\r\n self.Max_EDLP_Events = Max_EDLP_Events\r\n self.TPR_Events = TPR_Events\r\n self.Min_TPR_Events = Min_TPR_Events\r\n self.Max_TPR_Events = Max_TPR_Events\r\n\r\n self.Target_EDLP_Spend = Target_EDLP_Spend\r\n self.Target_TPR_Spend = Target_TPR_Spend\r\n self.Target_Trade_Spend = Target_Trade_Spend\r\n self.Mapping = Mapping\r\n self.Mapping_reverse = Mapping_reverse\r\n self.constants = constants\r\n self.EDLP_Coef = EDLP_Coef\r\n self.TPR_Coef = TPR_Coef\r\n\r\n self.EDLP_Interactions = EDLP_Interactions\r\n self.TPR_Interactions = TPR_Interactions\r\n self.EDLP_Int_Coef_Values = EDLP_Int_Coef_Values\r\n self.TPR_Int_Coef_Values = TPR_Int_Coef_Values\r\n self.Pantry_1 = Pantry_1\r\n self.Pantry_2 = Pantry_2\r\n\r\n self.Base_Price_stg1 = Base_Price_stg1\r\n self.Intercepts_stg1 = Intercepts_stg1\r\n self.Base_Price_stg2 = Base_Price_stg2\r\n self.Intercepts_stg2 = Intercepts_stg2\r\n\r\n self.Cat_Coef = Cat_Coef\r\n self.Disp_Coef = Disp_Coef", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def init_db(self):\n\n cursor = self._db_connection.cursor()\n\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS users (\n uid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n username VARCHAR(255) UNIQUE NOT NULL,\n full_name VARCHAR(255),\n password VARCHAR(255) NOT NULL\n );\n \n CREATE TABLE IF NOT EXISTS access_control (\n access_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n uid INTEGER NOT NULL,\n privilege VARCHAR(255) NOT NULL,\n UNIQUE (uid, privilege),\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS tokens (\n token_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n token_content VARCHAR(255) NOT NULL,\n expiration_datetime VARCHAR(255),\n uid INTEGER NOT NULL,\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS courses (\n course_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n course_abbreviation VARCHAR(255) NOT NULL,\n course_name VARCHAR(255) NOT NULL,\n instructor_id INTEGER NOT NULL,\n time VARCHAR(255) NOT NULL,\n seats INTEGER NOT NULL,\n CONSTRAINT fk_instructors FOREIGN KEY (instructor_id) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS enrollment_records (\n enrollment_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n uid INTEGER NOT NULL,\n course_id INTEGER NOT NULL,\n grade NUMERIC NOT NULL DEFAULT 100.0,\n UNIQUE (uid, course_id),\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid),\n CONSTRAINT fk_associated_course FOREIGN KEY (course_id) references courses (course_id)\n );\n ''')", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Trip.query.delete()\n Entry.query.delete()\n Category.query.delete()\n Share.query.delete()\n\n # Add sample data\n user1 = User(email='[email protected]', password=bcrypt.hashpw('user1'.encode('utf8'), bcrypt.gensalt(9)), name='One')\n user2 = User(email='[email protected]', password=bcrypt.hashpw('user2'.encode('utf8'), bcrypt.gensalt(9)), name='Two')\n trip1 = Trip(location='Spain', date='08/09/2017', name='Abroad Trip', user_id=1)\n entry1 = Entry(trip_id=1, name='Tibidabo', address='08035 Barcelona, Spain', notes='Fun day trip!',\n type_id=1)\n category1 = Category(name='Attraction')\n share1 = Share(viewer_id=2, trip_id=1)\n\n db.session.add_all([user1, user2, trip1, entry1, category1, share1])\n db.session.commit()", "def setup(self):\n #print \"Creating test database...\"\n files = glob.glob(os.path.join(self.home_dir, 'sqlFiles', '*.sql'))\n for fls in files:\n loc = fls.rfind('/')\n #print(\" \" + fls.replace('.sql', '')[loc + 1:])\n flh = open(fls, 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()\n for fls in ['INSERTS', 'TRIGGERS']:\n #print(fls)\n flh = open(os.path.join(self.home_dir, 'sqlFiles', fls), 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def import_counties():\n\n query = 'INSERT INTO texas_counties(county, region) VALUES(%s,%s)'\n with persistence() as db:\n # create new cursor instance\n cursor = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n for council, counties in COUNCIL_DATA.items():\n for county in counties:\n cursor.execute(query, (county, council))\n db.commit()", "def setUpTestData(cls):\n countries = [\"MX\", \"CHL\", \"USA\", \"PER\", \"COL\"]\n slack_user_ids = [\"UP0918MAV\", \"UP0918MAV\", \"UP0918MAV\", None, None]\n cls.menu = Menu.objects.create(available_on=date.today())\n for count in range(5):\n user = User.objects.create(username=f\"johny.doe {count}\")\n Employee.objects.create(\n user=user, country=countries[count], slack_user_id=slack_user_ids[count]\n )", "def create_tables(self):\n\n # Uses methods from class DbAuth\n cursor = self.connect.create_cursor()\n cursor.execute(\"USE `dbPurBeurre`\")\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Categories` (\"\n \" `num` INT UNSIGNED AUTO_INCREMENT,\"\n \" `id` VARCHAR(80) NOT NULL UNIQUE,\"\n \" `name` VARCHAR(80) NOT NULL,\"\n \" `url` VARCHAR(255) NOT NULL,\"\n \" `products` INT NULL,\"\n \" PRIMARY KEY (`num`))\"\n \" ENGINE = InnoDB\"\n )\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Produits` (\"\n \" `num` INT UNSIGNED AUTO_INCREMENT,\"\n \" `id` VARCHAR(80) NOT NULL UNIQUE,\"\n \" `product_name` VARCHAR(80) NOT NULL,\"\n \" `nutrition_grade_fr` CHAR(1) NOT NULL,\"\n \" `brands` VARCHAR(80) NULL,\"\n \" `stores` VARCHAR(80) NOT NULL,\"\n \" `url` VARCHAR(255) NOT NULL,\"\n \" `watchlist` DATE NULL,\"\n \" PRIMARY KEY (`num`, `id`))\"\n \" ENGINE = InnoDB\"\n )\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Asso_Prod_Cat` (\"\n \" `id_categories` VARCHAR(80) NOT NULL,\"\n \" `id_produits` VARCHAR(80) NOT NULL,\"\n \" PRIMARY KEY (`id_categories`, `id_produits`),\"\n \" CONSTRAINT `fk_id_categories`\"\n \" FOREIGN KEY (`id_categories`)\"\n \" REFERENCES `Categories` (`id`),\"\n \" CONSTRAINT `fk_id_produits`\"\n \" FOREIGN KEY (`id_produits`)\"\n \" REFERENCES `Produits` (`id`))\"\n \" ENGINE = InnoDB\"\n )" ]
[ "0.69008756", "0.66442764", "0.6618634", "0.65720314", "0.6367745", "0.63610566", "0.61599475", "0.61476094", "0.60915166", "0.60889375", "0.60289264", "0.59944206", "0.59331673", "0.5864972", "0.582192", "0.5799883", "0.5796334", "0.5792097", "0.5777224", "0.5760808", "0.5741017", "0.5714303", "0.5696844", "0.5693156", "0.5693014", "0.5668316", "0.5635157", "0.56118464", "0.55973214", "0.55897576", "0.5568381", "0.5547426", "0.5530872", "0.55300045", "0.55061775", "0.55056775", "0.55005866", "0.5480517", "0.54755205", "0.54691803", "0.5467102", "0.545828", "0.5447578", "0.54278415", "0.54269683", "0.5426604", "0.541382", "0.540727", "0.54033107", "0.540009", "0.5394304", "0.5392521", "0.5385077", "0.53843945", "0.536677", "0.5363409", "0.53619033", "0.53585196", "0.5355416", "0.5349354", "0.5348342", "0.53446865", "0.5343504", "0.53412694", "0.5336077", "0.5329909", "0.53268015", "0.53242385", "0.53196704", "0.5317065", "0.5316943", "0.53139275", "0.5312529", "0.5310118", "0.53096795", "0.530875", "0.5300851", "0.5295947", "0.5293937", "0.5289882", "0.5281536", "0.52807945", "0.52653956", "0.5263308", "0.52626914", "0.5255497", "0.52540356", "0.5251065", "0.5247085", "0.5246556", "0.52445936", "0.52440107", "0.52363193", "0.52333736", "0.52211064", "0.52153844", "0.5212769", "0.5208228", "0.5203642", "0.5203103" ]
0.71547
0
Populate locations data table.
def populate_locations(connection): print('Populating locations...') cursor = connection.cursor() with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file: locations = json.load(json_file) for station_id, location in locations.items(): cursor.execute(f'''SELECT id FROM watercourse_stations WHERE id = {station_id}''') if len(cursor.fetchall()): cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') cursor.execute(f'''UPDATE watercourse_stations SET location_id = {cursor.lastrowid} WHERE id = {station_id}''')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevation'] = nc['Elevation'].astype(int)\n self.geodata['Position'] = nc.astype(\n str).apply(lambda x: ','.join(x), axis=1)\n self.geodata.drop(columns=['Lat', 'Lng', 'Elevation'], inplace=True)\n\n # update 'Position' column in output data frame\n left = self.output.set_index('Location') # set left index\n right = self.geodata.set_index('Location') # set right index\n self.output = left.loc[:, left.columns.union(right.columns)] # union\n self.output.update(right) # update self.output \"Position\" column\n self.output.reset_index(inplace=True)", "def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations", "def get_all_locations(self):", "def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID", "def generate_test_locations(self):\n def generate_locations_for_organization(\n location_names, organization_name):\n item_dict = {}\n for name in location_names:\n item_dict['{}_{}'.format(name, organization_name)] = {\n 'name': name,\n 'organization': organization_name\n }\n return item_dict\n\n self.ls_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4', 'l5'], 'o1')\n\n self.ls_sub1_o1_dict = \\\n generate_locations_for_organization(\n ['l1', 'l2', 'l3', 'l4'], 'sub1_o1')\n\n self.ls_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2', 'l3', 'l4'], 'o2')\n\n self.ls_sub1_o2_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'sub1_o2')\n\n # generate locations of org_3\n self.ls_o3_dict = \\\n generate_locations_for_organization(['l1', 'l2'], 'o3')\n\n # generate locations dictionary\n self.ls_dict = {\n **self.ls_o1_dict,\n **self.ls_sub1_o1_dict,\n **self.ls_o2_dict,\n **self.ls_sub1_o2_dict,\n **self.ls_o3_dict,\n }\n\n # generate locations in database\n self.locations = self.create_locations_from_data(\n self.ls_dict, self.orgs)", "def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)", "def create_locations(self, data):\n total_objects = len(data)\n parsed_objects = 0\n\n for object in data:\n # Get location title. 'name' val is available to all objects, but Building 'title'\n # and RegionalCampus 'description' are more descriptive. Use them if available.\n if hasattr(object, 'title'):\n title = object['title']\n elif hasattr(object, 'description'):\n title = object['description']\n else:\n title = object['name']\n\n # Get other data.\n mapurl = object['profile_link']\n import_id = object['id']\n\n if title:\n # Check to see if the location name, map url are too long\n if len(title) > 256:\n title = title[0:256]\n if len(mapurl) > 400:\n mapurl = mapurl[0:400]\n if len(import_id) > 256:\n import_id = import_id[0:256]\n\n # See if an existing location exists with the current object ID.\n # Update the existing location if it exists; else, save the new location\n try:\n old_location = Location.objects.get(import_id=import_id)\n except Exception as e:\n logging.debug('No existing location found for %s: %s. Creating new location...' % (title, e))\n # No existing matches found, or the matches were duplicate\n new_location = Location(title=title, url=mapurl, room='', import_id=import_id, reviewed=True)\n try:\n new_location.save()\n except Exception as e:\n logging.error('Unable to save new location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('New location %s created.' % title)\n else:\n logging.debug('Existing location %s found with Import ID %s. Updating existing location...' % (title, import_id))\n old_location.title = title\n old_location.url = mapurl\n old_location.room = ''\n old_location.reviewed = True\n try:\n old_location.save()\n except Exception as e:\n logging.error('Unable to save existing location %s: %s' % (title, str(e)))\n else:\n parsed_objects += 1\n logging.info('Existing location %s with Import ID %s updated.' % (title, import_id))\n\n logging.info('Done. %s of %s available objects successfully imported.' % (parsed_objects, total_objects))", "def add_locations(updatemedf):\n # List of unique identifiers to search in text\n uniquedf = pd.read_csv('uniquelist.csv', encoding=\"latin-1\")\n\n # Use lookup list to find text and look for institution matches, \n # once found append unique list of matching institution lookups\n df = add_univ_city(updatemedf, uniquedf)\n # Output this to a file for checking and adding more values as needed\n return df", "def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()", "def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def get_locations():\n\n dtype = {'id': str,\\\n 'lat': float,\n 'lon': float,\n 'address': str,\n }\n\n try: \n locations = pd.read_csv('Data/kulkijat-mittauspisteet.csv', sep=',', dtype=dtype)\n except FileNotFoundError:\n print('\\nMittauspisteet sisältävää tiedostoa kulkijat-mittauspisteet.csv ei löytynyt.\\n')\n locations = pd.DataFrame()\n\n return locations", "def _update_locations(self):\n raw_data = self._read_data()\n processed_data = self._process_data(raw_data) if self._process_data is not None else raw_data\n if processed_data:\n for k, v in processed_data.items():\n if k in self._locations.keys():\n self.predictors[k].update(*v)\n self._locations[k] = v\n for k, v in self._locations.items():\n if k not in processed_data:\n self._locations[k] = self.predictors[k].predict()\n else:\n self._locations = {k: self.predictors[k].predict() for k in self._locations.keys()}\n self._logger.debug(\"Locator updated locations\")", "def set_locations():\n STATUS['locations']['monster'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['monster'][1] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][1] = generate_random_coord(STATUS['grid_size'])", "def get_locations(self):\n try:\n output_json = {}\n total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n output_json = total_locations\n return AppConstants.result_success_template(output_json)\n\n except Exception as e:\n print(\"Error while fetching the Location Data.\", str(e))", "def load(self, locations):\n try:\n assert isinstance(locations, list)\n super(Arrivals, self).load({'locIDs': locations})\n except AssertionError:\n print(\"Locations must be a list\")", "def _gen_locs_dbscan(sp, distance_metric, db):\n p = np.array([sp.geometry.x, sp.geometry.y]).transpose()\n if distance_metric == \"haversine\":\n p = np.deg2rad(p) # haversine distance metric assumes input is in rad\n labels = db.fit_predict(p)\n sp[\"location_id\"] = labels\n return sp", "def load(self):\n return list(self.obj.locations_set.all())", "def populate_contents(self):\n\n data_table = self.data_table\n world = self.world\n\n self.add_text_row('World Name', data_table.world_name_label.text())\n self.add_text_row('Coordinates', data_table.world_coords_label.text())\n self.add_text_row('World Type', data_table.world_type_label.text())\n if data_table.world_extra_label.text() != '':\n self.add_text_row('Extra Info', data_table.world_extra_label.text())\n self.add_text_row('Filename', world.base_filename)\n self.add_text_row('Size', '{}x{}'.format(*world.info.size))\n\n if len(world.info.dungeons) > 0:\n dungeons = self.add_text_row('Dungeons', '<br/>'.join(sorted(world.info.dungeons)))\n else:\n self.add_text_row('Dungeons', '-')\n\n if len(world.info.biomes) > 0:\n biomes = self.add_text_row('Biomes', '<br/>'.join(sorted(world.info.biomes)))\n else:\n self.add_text_row('Biomes', '-')", "def populate_cities():\n if City.query.filter_by(name=CITIES[0]).first():\n return\n\n for city in CITIES:\n _add_city(city)", "def create_data():\n # Locations\n data = {}\n num_vehicles = 20\n depot = 0\n locations = loc1\n demands = popn\n\n num_locations = len(locations)\n dist_matrix = {}\n\n for from_node in range(0,num_locations):\n dist_matrix[from_node] = {}\n\n for to_node in range(0,num_locations):\n dist_matrix[from_node][to_node] = (\n haversine(\n locations[from_node],[to_node])\n #locations[to_node],[from_node])\n \"\"\"\n data[\"distances\"] =dist_matrix\n data[\"num_locations\"] = len(dist_matrix)\n data[\"num_vehicles\"] = 6\n data[\"depot\"] = 0\n data[\"demands\"] = demands\n #data[\"vehicle_capacities\"] = capacities\n data[\"time_per_demand_unit\"] = 0.05\n return data\n \"\"\"\n return [ num_vehicles, depot, locations, dist_matrix]", "def create_locations_from_data(self, data, orgs):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = \\\n Location(\n name=data['name'],\n organization=orgs.get(data['organization']))\n item_dict[item_name].save()\n return item_dict", "def getAllLocation(table):\n\tlocs = []\n\n\tnum = len(table)\n\n\tfor i in range(num):\n\t\t# first field is the name\n\t\tloc = getLatAndLong(table[i][1])\n\n\t\tlocs.append(loc)\n\n\treturn locs", "def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))", "def locations_fixture(location):\n return [location, _create_location()]", "def build_polling_location_txt(self):\n self.base_df['address_line'] = self.base_df.apply(\n lambda row: self.get_address_line(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['directions'] = self.base_df.apply(\n lambda row: self.get_directions(), axis=1)\n #\n self.base_df['hours'] = self.base_df.apply(\n lambda row: self.get_hours(row['index'],row['start_time'], row['end_time']), axis=1)\n\n self.base_df['photo_uri'] = self.base_df.apply(\n lambda row: self.get_photo_uri(), axis=1)\n\n self.base_df['hours_open_id'] = self.base_df.apply(\n lambda row: self.create_hours_open_id(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['is_drop_box'] = self.base_df.apply(\n lambda row: self.is_drop_box(), axis=1)\n\n self.base_df['is_early_voting'] = self.base_df.apply(\n lambda row: self.is_early_voting(), axis=1)\n\n self.base_df['latitude'] = self.base_df.apply(\n lambda row: self.get_latitude(), axis=1)\n\n self.base_df['longitude'] = self.base_df.apply(\n lambda row: self.get_longitude(), axis=1)\n\n self.base_df['latlng_source'] = self.base_df.apply(\n lambda row: self.get_latlng_source(), axis=1)\n\n self.base_df['id'] = self.base_df.apply(\n lambda row: self.create_id(row['index'], row['ocd_division'],row['address1'], row['address2'],\n row['city'], row['state'], row['zip_code']), axis=1)\n\n return self.base_df", "def process_location(pid, data_source, out_loc, start_date, end_date, debug=False):\n\n #query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n query = try_query(pid)\n\n location_query = try_filter(query, pid, 'pdk-location', start_date, end_date)\n \"\"\" location_query = query.filter(source=pid, \n generator_identifier='pdk-location',\n created__gte=start_date,\n created__lte=end_date).order_by('created')\n \"\"\"\n tot_count = location_query.count()\n count = 0\n frac = int(tot_count / 100)\n\n loc_df = pd.DataFrame()\n for point in location_query:\n point_df = pd.DataFrame.from_dict(point).iloc[0].to_frame().transpose()\n metadata_df = pd.Series(point['passive-data-metadata']).to_frame().transpose()\n # TODO check if ignoring errors is safe\n metadata_df = metadata_df.drop(['latitude', 'longitude'], axis='columns', errors=\"ignore\")\n point_df.reset_index(inplace=True, drop=True)\n point_df = pd.concat([metadata_df, point_df], axis=1, sort=True)\n \n point_df.drop('passive-data-metadata', axis='columns', inplace=True)\n missing_cols = [col for col in loc_df.columns.values if col not in point_df.columns.values]\n \n if len(missing_cols) > 0 and loc_df.shape[0] > 0:\n for col in missing_cols:\n point_df[col] = np.nan\n point_df = point_df[loc_df.columns]\n loc_df = loc_df.append(point_df)\n count += 1\n if debug and (count % frac == 0):\n print(\"{0:.2f}% complete\".format(float(count)/float(tot_count)*100))\n\n loc_df['pid'] = pid \n loc_df['data_source'] = data_source\n print(loc_df.shape)\n \n pickle.dump(loc_df, open(\"{}/pdk-location/{}.df\".format(out_loc, pid), 'wb'), -1)", "def populate_stat(self, table):\n myrow = table.row\n # HDF5 doesn't handle unicode strings, so we need to convert to \n # *byte* strings, which we can put in the HDF5 file \n addy = numpy.zeros(len(self.address), \n dtype=(numpy.str, glob.nchar_address))\n for i in range(len(addy)):\n addy[i] = (self.address[i]).encode('utf8')\n\n myrow[\"address\"] = addy\n myrow[\"bike_stands\"] = self.bike_stands\n myrow[\"number\"] = self.number\n myrow[\"position\"] = self.position\n myrow.append()\n table.flush()", "def __init__(self,\n locations: List['LocationOutput']) -> None:\n self.locations = locations", "def test_locations(self):\n url = reverse(\"locations\", args=[00000])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, list))\n self.assertTrue(response.data) # list not empty\n\n location_data = response.data[0]\n data_keys = [\n \"title\",\n \"address\",\n \"address2\",\n \"city\",\n \"state\",\n \"postalCode\",\n \"distance\",\n \"hours\",\n \"phone\",\n \"geocode\",\n ]\n self.assertEqual(list(location_data.keys()), data_keys)", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def populate(self):\n pass", "def _populate_output(self):\n self._store_atomic_queries_table()\n self._store_composite_queries_table()", "def __init__(self, obs, start_date, end_date, histdata, geodata):\n self.start_date = start_date\n self.end_date = end_date\n self.obs = obs\n self.geodata = pd.read_csv(get_datafile(geodata))\n self.histdata = aggregate_data(histdata)\n self.locations = self.histdata['Location'].unique().tolist()\n self.output = None\n self.cols = ['Location', 'Position', 'Local Time',\n 'Conditions', 'Temperature', 'Pressure', 'Humidity']", "def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)", "def extract_locations(self):\n default_pos_columns = common_cfg.coord_col_names\n if set(default_pos_columns).issubset(set(self._raw_data.columns)):\n print('Location data found')\n # check and drop units outside provided city boundary\n geometry = [shapely.geometry.Point(xy) for xy in zip(\n self._raw_data[default_pos_columns[0]], # Long\n self._raw_data[default_pos_columns[1]])] # Lat\n b_within_boundary = np.array(list(map(\n lambda p: p.within(self.model_city.convhull), geometry)))\n\n if not all(b_within_boundary):\n print('%s -- dropping %i units outside city.' %\n (self.servicetype,\n sum(np.bitwise_not(b_within_boundary))))\n self._raw_data = self._raw_data.iloc[\n b_within_boundary, :].reset_index()\n\n # store geolocations as geopy Point\n locations = [geopy.Point(yx) for yx in zip(\n self._raw_data[default_pos_columns[1]], # Lat\n self._raw_data[default_pos_columns[0]])] # Long\n\n propert_data = self._raw_data.drop(default_pos_columns, axis=1)\n\n else:\n raise NotImplementedError('Locations not found - not implemented!')\n\n return propert_data, locations", "def get_all_locations():\n with mysql.db_session(read_only=True) as session:\n locations = session.query(Location)\n\n if not locations:\n return response.create_not_found_response(message='No data found.')\n locations_list = [location.to_dict() for location in locations.all()]\n\n return response.Response(message=locations_list)", "def get_location_hash_table():\n return locationHashTable", "def locations(self):\n return self.data.get(\"locations\", [])", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Location.query.delete()\n\n # Add sample users and locations\n\n trinity = User(fname='Trinity', email='[email protected]',\n username='questionit', password='l0lagent')\n neo = User(fname='Neo', email='[email protected]',\n username='neo', password='l0lagent')\n\n tacorea = Location(yelp_id='tacorea-san-francisco', name='Tacorea',\n latitude='37.7749', longitude='122.3392',\n address='809 Bush St, San Francisco, CA 94108',\n yelp_url='[email protected]', pic='pic')\n\n db.session.add_all([trinity, neo, tacorea])\n db.session.commit()", "def _setData(self):\n self._data = [ item for ministry in self.query.all() \n for item in self._getItems(ministry) ]", "def _get_initial_location(self):\n for i in self.modelunctab.tableview.selectedIndexes():\n self.locid = i.row()\n break\n else:\n self.locid = 0\n\n self.locnaam = self.result_locations[self.locid]", "def _set_location(self):\n if self._report_key == ReportTypes.MHR_REGISTRATION:\n location = self._report_data.get('location')\n if location.get('lot') or location.get('parcel') or location.get('block') or location.get('districtLot') or\\\n location.get('partOf') or location.get('section') or location.get('township') or \\\n location.get('range') or location.get('meridian') or location.get('landDistrict') or \\\n location.get('plan'):\n location['hasLTSAInfo'] = True\n else:\n location['hasLTSAInfo'] = False\n if location.get('pidNumber'):\n pid = location.get('pidNumber')\n location['pidNumber'] = pid[0:3] + '-' + pid[3:6] + '-' + pid[6:]\n elif self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n for detail in self._report_data['details']:\n location = detail.get('location')\n if location.get('lot') or location.get('parcel') or location.get('block') or \\\n location.get('districtLot') or location.get('partOf') or location.get('section') or \\\n location.get('township') or location.get('range') or location.get('meridian') or \\\n location.get('landDistrict') or location.get('plan'):\n location['hasLTSAInfo'] = True\n else:\n location['hasLTSAInfo'] = False\n if location.get('pidNumber'):\n pid = location.get('pidNumber')\n location['pidNumber'] = pid[0:3] + '-' + pid[3:6] + '-' + pid[6:]", "def _populate_zone_facts_table(self):\n census_fields = [\n 'poverty_rate', 'fraction_black', 'income_per_capita',\n 'labor_participation', 'fraction_foreign',\n 'fraction_single_mothers', 'acs_lower_rent_quartile',\n 'acs_median_rent', 'acs_upper_rent_quartile'\n ]\n\n zone_types = ['ward', 'neighborhood_cluster', 'census_tract']\n\n query_results = list()\n\n # populate columns accordingly for each zone_specific type\n for zone_type in zone_types:\n field_values = dict()\n\n # get field value for each zone_specific type\n for field in census_fields:\n result = self._census_with_weighting(data_id=field,\n grouping=zone_type)\n field_values[field] = result['items']\n\n zone_specifics = self._get_zone_specifics_for_zone_type(zone_type)\n\n # TODO: add aggregate for each zone_type into table\n for zone in zone_specifics:\n # get not None values so we can added to db\n columns = list()\n values = list()\n for field in census_fields:\n zone_value = field_values[field][zone]\n\n if zone_value is not None:\n columns.append(field)\n values.append(\"'\" + str(zone_value) + \"'\")\n\n # derive column and values strings needed for sql query\n columns = ', '.join(columns)\n columns = 'zone, ' + columns\n\n values = ', '.join(values)\n values = \"'\" + zone + \"', \" + values\n\n q = \"INSERT INTO zone_facts ({cols}) VALUES ({vals})\".format(\n cols=columns, vals=values)\n\n with self.engine.connect() as conn:\n result = conn.execute(q)\n query_results.append(result)\n\n return query_results", "def locate(self):\n \n #CONNECT TO API\n api = GoogleV3(api_key = self.google_key)\n\n #INITALIZE ARRAY\n array = []\n\n #START GEOCODING ADDRESSES\n for i in tqdm(range(len(self.df)), desc='Geocoding Addresses'):\n\n \n row = self.df.iloc[i]\n\n #GET ADDRESS VARIABLES\n st_name = row['street_name']\n st_number = row['house_number']\n city = row['city']\n state = row['state/province']\n listing_number = row['listing_number']\n zip = row['postal_code']\n\n\n #FORMAT ADDRESS FOR API\n full_address = str(\"{} {},{},{},{}\".format(st_number, st_name, city, state, zip))\n\n #TRY TO LOCATE WITH GOOGLE\n try:\n \n location = api.geocode(full_address, timeout=10)\n\n lat = location.latitude\n lon = location.longitude\n \n\n info = [lat,lon, listing_number]\n\n array.append(info)\n\n next \n\n #Go to next if you cant locate\n except:\n\n info = [0,0, listing_number]\n\n array.append(info)\n\n next\n\n #CONVERT SERIES TO DATAFRAME\n geo_data = pd.DataFrame(data = array, columns = ['lat', 'lon', 'listing_number'])\n \n #INNER JOIN DATA TO DATAFRAME\n self.df = pd.merge(self.df, geo_data, on= 'listing_number', how = 'inner')", "def test_list_zr_locations(self):\n pass", "def get_location_list(self) -> DBRecList:\n raise NotImplementedError('not implemented')", "def locations():\n sql = \"\"\"SELECT DISTINCT sample_location\n FROM barcodes.sample\n ORDER BY sample_location\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return pm.sql.TRN.execute_fetchflatten()", "def places_process_rows(self):\n\n for index in range(len(self.table)):\n row_rdf = self.places_map_row_to_rdf(self.table.iloc[index])\n if row_rdf is not None:\n self.data += row_rdf", "def table_allocations(\n self, id_value, id_type, start_date=None, end_date=None, freq=None\n ):\n\n start_date, end_date, freq = self.get_time_parameters(\n start_date, end_date, freq\n )\n\n try:\n df = self.get_allocations(id_value, id_type, start_date, end_date, freq)\n\n if id_type == \"project\" and \"ALL\" not in str(id_value):\n # add the project's missing people allocation\n if freq == \"D\":\n df[\"UNALLOCATED\"] = self.wim.project_peoplereq[id_value]\n else:\n df[\"UNALLOCATED\"] = (\n self.wim.project_peoplereq[id_value].resample(freq).mean()\n )\n\n elif id_type == \"person\" and \"ALL\" not in str(id_value):\n # add the person's total project assignment to the data frame\n if freq == \"D\":\n df[\"TOTAL\"] = self.wim.people_totals[id_value]\n else:\n df[\"TOTAL\"] = self.wim.people_totals[id_value].resample(freq).mean()\n\n df = self.format_date_index(df, freq)\n\n return self.highlight_allocations(df)\n\n except ValueError as e:\n print(e)", "def PopulatePointToPointDistancesResultsTable(self, markups, table):\n headerList = [markups.GetNthFiducialLabel(i) for i in range(markups.GetNumberOfFiducials())]\n table.setRowCount(len(headerList))\n table.setColumnCount(len(headerList))\n table.setHorizontalHeaderLabels(headerList)\n table.setVerticalHeaderLabels(headerList)\n\n for point1 in range(markups.GetNumberOfFiducials()):\n for point2 in range(point1 + 1, markups.GetNumberOfFiducials()):\n point1_RAS = [0, 0, 0]\n point2_RAS = [0, 0, 0]\n markups.GetNthFiducialPosition(point1, point1_RAS)\n markups.GetNthFiducialPosition(point2, point2_RAS)\n distance = round(self.GetPointToPointDistance(point1_RAS, point2_RAS), 1)\n table.setItem(point1, point2, qt.QTableWidgetItem(str(distance)))\n\n table.show()\n table.resizeColumnsToContents()\n table.resizeRowsToContents()", "def location_fixture(self, db, scope=\"class\"):\n name = Name.objects.create(name=\"Event\", name_type=Name.SOFTWARE)\n loc1 = Location.objects.create(\n status=0,\n latitude=33.210241,\n longitude=-97.148857,\n belong_to_name=name)\n\n loc2 = Location.objects.create(\n status=0,\n latitude=33.210241,\n longitude=-97.148857,\n belong_to_name=name)\n\n loc3 = Location.objects.create(\n status=0,\n latitude=33.210241,\n longitude=-97.148857,\n belong_to_name=name)\n\n # Only return the ids, because the tests will\n # need to instantiate a new object to get the correct\n # statuses.\n return loc1.id, loc2.id, loc3.id", "def findAllLocations(cls):\r\n return cls.query.all()", "def meta_properties(self, date_col=\"Date\", type_col=\"Primary Type\", lat_col=\"Latitude\",\\\n lon_col=\"Longitude\", loc_col=\"Location\", out_fname=\"data_formated.csv\"):\n # implement keywords\n # would we have to deal w/ file w/o headers?\n data = pd.read_csv(self._path, usecols=[date_col, type_col, lat_col, lon_col, loc_col],\\\n parse_dates=[date_col], infer_datetime_format=True)\n data.sort_values(date_col, inplace=True)\n min_date = data.iloc[0][date_col]\n max_date = data.iloc[(data.shape[0]-1)][date_col]\n\n lat = []\n lon = []\n\n nulls = []\n for row in data.itertuples(index=True, name='Pandas'):\n index = (row.Index)\n # if lat, lon = nan, drop the row\n # update: confirmed that issue is with code, not with data; for some reason\n # csv is actually correctly grabbing location, there just legitimately are\n # entries w/o location data\n if pd.isnull(getattr(row, loc_col)):\n # print(\"row: {} got a {} for the 'Location' column with date: {}\".format(index, \\\n # getattr(row, loc_col), getattr(row, date_col)))\n if not pd.isnull(getattr(row, lat_col)) and not pd.isnull(getattr(row, lon_col)):\n lat.append(str(getattr(row, lat_col)))\n lon.append(str(getattr(row, lon_col)))\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n print(\\\n \"Successfully extracted lat, lon from lat_col, lon_col for row: {}\".format(index))\n else:\n nulls.append((index, getattr(row, date_col)))\n data.drop(index, inplace=True)\n # print(\"No location data available for row: {} with date: {}\".format(index,\\\n # getattr(row, date_col)))\n else:\n loc = literal_eval(getattr(row, loc_col))\n lat.append(loc[0])\n lon.append(loc[1])\n if \",\" in data.loc[index, type_col]:\n data.loc[index, type_col] = data.loc[index, type_col].replace(\",\", \" \")\n\n data[\"Latitude\"] = lat\n data[\"Longitude\"] = lon\n data.drop(loc_col, axis=1, inplace=True)\n\n data.sort_values(\"Latitude\", inplace=True)\n min_lat = float(data.iloc[0][\"Latitude\"])\n max_lat = float(data.iloc[(data.shape[0]-1)][\"Latitude\"])\n\n data.sort_values(\"Longitude\", inplace=True)\n min_lon = float(data.iloc[0][\"Longitude\"])\n max_lon = float(data.iloc[(data.shape[0]-1)][\"Longitude\"])\n\n data.to_csv(self.__file_dir+'/'+out_fname, na_rep=\"\", header=False, index=False)\n\n attrs = {'min_date': min_date, 'max_date': max_date, \"min_lat\":min_lat,\\\n \"max_lat\":max_lat, \"min_lon\":min_lon, \"max_lon\":max_lon, \\\n \"dates\":pd.date_range(min_date, max_date), \"num_attributes\": data.shape[1],\\\n \"num_entries\":data.shape[0]}\n self._meta_dict = attrs\n self._meta_dict['df'] = data\n pickle.dump(data, open(CWD + \"/meta_dict.p\", \"wb\"))\n print(\"Num entries w/o location data: {}\".format(len(nulls)))\n pickle.dump(nulls, open(CWD + \"/nulls.p\", \"wb\"))\n\n # not include the formatted dataset?\n return attrs", "def upsert_location(self, location):", "def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)", "def populate_dyn(self, table):\n myrow = table.row\n myrow[\"sample_time\"] = int(time.time() - glob.base_time)\n myrow[\"available_bike_stands\"] = self.available_bike_stands\n myrow[\"available_bikes\"] = self.available_bikes\n myrow[\"last_update\"] = self.last_update\n myrow[\"status\"] = self.status\n myrow.append()\n table.flush()", "def create_table(f, geoinfo):\n bounds_cols = xb_points + yb_points\n df = pd.read_csv(f, delimiter=\";\", index_col=\"INDEX_RC\")\n df[duration_name] = parse_duration_level(f)\n df = df.join(geoinfo[[\"X_CENT_GEO\", \"Y_CENT_GEO\", \"Col\", \"Row\"]])\n df = df.rename(columns={\"Col\": x, \"Row\": y, \"X_CENT_GEO\": lon, \"Y_CENT_GEO\": lat})\n return df", "def set_location(self, location_set):", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def _setData(self):\n\n if not self.stationId:\n return\n \"\"\" \n # get the ressource url and adjust lat and lon from data portal\n query = sparqls.stationResource(self.stationId)\n key, val = RunSparql(query, 'array').run()\n if val: \n self.url = val[0][0]\n self.lat = float(val[0][2])\n self.lon = float(val[0][3])\n \"\"\"\n\n # it is possible, that a station id has multiple URI\n # ask for all URI\n query = sparqls.stationData(self.uri, 'all')\n data = RunSparql(query, 'pandas').run()\n\n if not data.empty:\n self._data = data\n else:\n self._data = 'no data available'\n\n # check if data is available and extract the 'unique' data products\n if isinstance(self._data, pd.DataFrame):\n p = self._data['specLabel'].unique()\n self._products = pd.DataFrame(p)\n\n # replace samplingheight=None with empty string\n self._data.samplingheight.replace(to_replace=[None], value=\"\", inplace=True)\n else:\n self._products = 'no data available'", "def locations(self):\r\n return Locations(self)", "def build(self):\n states = WOFRegion.query.filter(WOFRegion.country_iso=='US')\n\n logger.info('Indexing US states.')\n\n for row in tqdm(states):\n\n # Key -> id(s)\n for key in map(keyify, state_key_iter(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> state\n self.add_location(row.wof_id, StateMatch(row))", "def _populate_output(self):\n self._store_query_percentiles_table()", "def construct_locations(path_to_shapes, path_to_land_eligibility_km2, path_to_hydro_capacities_mw,\n path_to_biofuel_potential_mwh, flat_roof_share, maximum_installable_power_density,\n scaling_factors, biofuel_efficiency, path_to_output_yaml, path_to_output_csv):\n locations = gpd.GeoDataFrame(\n gpd.read_file(path_to_shapes).set_index(\"id\")\n )\n locations = (\n locations\n .assign(centroid=locations.centroid.rename(\"centroid\"))\n .loc[:, [\"name\", \"centroid\"]]\n )\n capacities = _from_area_to_installed_capacity(\n land_eligibiligy_km2=pd.read_csv(path_to_land_eligibility_km2, index_col=0),\n flat_roof_share=flat_roof_share,\n maximum_installable_power_density=maximum_installable_power_density\n )\n hydro_capacities = pd.read_csv(path_to_hydro_capacities_mw, index_col=0)\n biofuel = pd.read_csv(path_to_biofuel_potential_mwh, index_col=0) * biofuel_efficiency\n locations = locations.merge(\n pd.concat([capacities, hydro_capacities, biofuel], axis=\"columns\", sort=True),\n how=\"left\",\n left_index=True,\n right_index=True,\n validate=\"one_to_one\"\n )\n locations = locations.assign(id=locations.index.str.replace(\".\", \"-\")).set_index(\"id\")\n\n env = jinja2.Environment()\n env.filters[\"unit\"] = filters.unit\n rendered = env.from_string(TEMPLATE).render(\n locations=locations,\n scaling_factors=scaling_factors\n )\n with open(path_to_output_yaml, \"w\") as result_file:\n result_file.write(rendered)\n locations.name.to_csv(path_to_output_csv, index=True, header=True)", "def get_geo_data(self):\n # Get all countries and create a dictionary by name\n countries_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='admin_0_countries',\n )\n self.countries = list(shpreader.Reader(countries_shp).records())\n self.countries_by_name = {}\n self.countries_by_iso_a2 = {}\n for country in shpreader.Reader(countries_shp).records():\n self.countries_by_name[country.attributes['NAME_LONG']] = country\n self.countries_by_iso_a2[country.attributes['ISO_A2']] = country\n\n # Get all states and create a dictionary by name\n states_provinces_shp = shpreader.natural_earth(\n resolution='50m',\n category='cultural',\n name='admin_1_states_provinces',\n )\n# full_list = list(shpreader.Reader(states_provinces_shp).records())\n# self.states = [x for x in full_list if x.attributes['type_en'] == 'State']\n self.states = list(shpreader.Reader(states_provinces_shp).records())\n self.states_by_name = {}\n for state in self.states:\n self.states_by_name[state.attributes['name']] = state\n\n # Get all timezones and create a dictionary by name\n timezones_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='time_zones',\n )\n self.timezones = list(shpreader.Reader(timezones_shp).records())\n self.timezones_by_name = {}\n for timezone in shpreader.Reader(timezones_shp).records():\n # Try to get the actual name. Something like `Europe/Berlin`\n timezone_name = timezone.attributes['tz_name1st']\n # If there is no name, we default to the utc offset name `-5` `+4.5`\n if timezone_name == '':\n timezone_name = timezone.attributes['name']\n\n if timezone_name not in self.timezones_by_name.keys():\n self.timezones_by_name[timezone_name] = timezone", "def load_geolocation_data():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lkdata\"]\n data = pd.DataFrame(list(lk_collection.find()))\n data = data[[\"fields\"]]\n data = pd.concat([pd.DataFrame(data), pd.DataFrame(list(data[\"fields\"]))], axis=1).drop(\"fields\", 1)\n data[\"cca_2\"] = pd.to_numeric(data[\"cca_2\"])\n return data", "def populate(self):\n raise NotImplementedError", "def build(self):\n allow_bare = AllowBareCityName(blocklist=self.bare_name_blocklist)\n\n iter_keys = CityKeyIter(allow_bare)\n\n # Deduped cities.\n cities = WOFLocality.clean_us_cities()\n\n logger.info('Indexing US cities.')\n\n for row in tqdm(cities):\n\n # Key -> id(s)\n for key in map(keyify, iter_keys(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> city\n self.add_location(row.wof_id, CityMatch(row))", "def create_locations_in_numbers_section(self):\n section = self.sections['Locations in numbers:']\n column, row = Counter(), Counter()\n # first row:\n label = Label(section, text='Locations total:')\n label.grid(column=column.next(), row=row.next())\n label.bind('<Button-1>', partial(self.change_locations_filter, None))\n self.locations_counter = Entry(\n section, textvariable=self.locations_count, width=4,\n disabledbackground='white',\n disabledforeground='black', state=DISABLED, justify=CENTER\n ).grid(column=column.next(), row=row())\n for location, value in self.locations_by_type.items():\n if column() == 18:\n column.restart()\n row.next()\n text = plural(location.value, self.language)\n label = Label(section, text=f'{text.title()}:')\n label.grid(column=column.next(), row=row())\n label.bind('<Button-1>',\n partial(self.change_locations_filter, location))\n Entry(section, textvariable=value, width=4,\n disabledbackground='white',\n disabledforeground='black', state=DISABLED, justify=CENTER\n ).grid(column=column.next(), row=row())", "def get_data(self):\n data = load.loader.get_data_for_hotel(self.hotel_name, self.filter)\n\n self.data_items = []\n\n if data:\n for row in data:\n for col in row:\n self.data_items.append(col)\n self.row_number = str(self.get_row_number())\n\n self.hotel_full_data = self.hotel_name + ' ' + load.loader.get_hotel_address(self.hotel_name)", "def get_locations_by_ids(self, id_list):", "def location_duplicates():\n\n if not auth.s3_has_role(1):\n redirect(URL(r=request, c=\"default\", f=\"index\"))\n \n # @ToDo: Set this via the UI & pass in as a var\n dupe_distance = 50 # km\n\n # Shortcut\n locations = db.gis_location\n\n table_header = THEAD(TR(TH(T(\"Location 1\")),\n TH(T(\"Location 2\")),\n TH(T(\"Distance(Kms)\")),\n TH(T(\"Resolve\"))))\n\n # Calculate max possible combinations of records\n # To handle the AJAX requests by the dataTables jQuery plugin.\n totalLocations = db(locations.id > 0).count()\n\n item_list = []\n if request.vars.iDisplayStart:\n end = int(request.vars.iDisplayLength) + int(request.vars.iDisplayStart)\n locations = db((locations.id > 0) & \\\n (locations.deleted == False) & \\\n (locations.lat != None) & \\\n (locations.lon != None)).select(locations.id,\n locations.name,\n locations.level,\n locations.lat,\n locations.lon)\n # Calculate the Great Circle distance\n count = 0\n for oneLocation in locations[:len(locations) / 2]:\n for anotherLocation in locations[len(locations) / 2:]:\n if count > end and request.vars.max != \"undefined\":\n count = int(request.vars.max)\n break\n if oneLocation.id == anotherLocation.id:\n continue\n else:\n dist = gis.greatCircleDistance(oneLocation.lat,\n oneLocation.lon,\n anotherLocation.lat,\n anotherLocation.lon)\n if dist < dupe_distance:\n count = count + 1\n item_list.append([oneLocation.name,\n anotherLocation.name,\n dist,\n \"<a href=\\\"../gis/location_resolve?locID1=%i&locID2=%i\\\", class=\\\"action-btn\\\">Resolve</a>\" % (oneLocation.id, anotherLocation.id)\n ])\n else:\n continue\n\n item_list = item_list[int(request.vars.iDisplayStart):end]\n # Convert data to JSON\n result = []\n result.append({\n \"sEcho\" : request.vars.sEcho,\n \"iTotalRecords\" : count,\n \"iTotalDisplayRecords\" : count,\n \"aaData\" : item_list\n })\n output = json.dumps(result)\n # Remove unwanted brackets\n output = output[1:]\n output = output[:-1]\n return output\n else:\n # Don't load records except via dataTables (saves duplicate loading & less confusing for user)\n items = DIV((TABLE(table_header,\n TBODY(),\n _id=\"list\",\n _class=\"dataTable display\")))\n return(dict(items=items))", "def load_pneumonia_locations():\n pneumonia_locations = {}\n # load table\n\n with open(os.path.join(LABEL_PATHS), mode='r') as infile:\n # open reader\n reader = csv.reader(infile)\n # skip header\n next(reader, None)\n # loop through rows\n for rows in reader:\n # retrieve information\n filename = rows[0]\n location = rows[1:5]\n pneumonia = rows[5]\n # if row contains pneumonia add label to dictionary\n # which contains a list of pneumonia locations per filename\n if pneumonia == '1':\n # convert string to float to int\n location = [int(float(i)) for i in location]\n # save pneumonia location in dictionary\n if filename in pneumonia_locations:\n pneumonia_locations[filename].append(location)\n else:\n pneumonia_locations[filename] = [location]\n\n return pneumonia_locations", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def _populate_df(self, df, objs,):\n for obj in objs:\n for prop in df.columns:\n df.loc[obj.name, prop] = getattr(obj, prop)", "async def synchronise_meta_data_table(\n location_id: LocationID, dry_run: bool = False, fire_and_forget: bool = False\n):", "def add_amenities(self): \n amenity_count = [self.search(lat, lon) for lat, lon in\n zip(self.df_ads['latitude'], self.df_ads['longitude'])]\n \n self.df_ads_mapdata = pd.concat(\n [self.df_ads.reset_index(drop=True), pd.DataFrame(amenity_count)], axis=1)\n\n assert len(self.df_ads_mapdata) == len(self.df_ads)", "def locations(request):\n locations = Location.objects.all()\n context = {'locations': locations}\n return render(request, 'std/Locations.html', context)", "def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]", "def _init_list(self, data: List[Any]):\n # Assume data is homogenous in regard to row type\n obj = data[0]\n column_names = self._column_name_getter(obj)\n column_values = self._column_value_getter(obj)\n\n # Map of source to destination column\n column_map = {}\n\n # Do not update columns if predefined\n add_columns = not bool(self._columns)\n\n for obj in data:\n row = [None] * len(self._columns)\n\n for column_src in column_names(obj):\n # Check if column has been added with different name\n column_dst = column_map.get(column_src, column_src)\n\n # Dictionaries and namedtuples can\n # contain unknown columns\n if column_dst not in self._columns:\n if not add_columns:\n continue\n\n col = self._add_column(column_dst)\n\n # Store map of source column name to created name\n column_dst = self._columns[col]\n column_map[column_src] = column_dst\n\n while len(row) < len(self._columns):\n row.append(None)\n\n col = self.column_location(column_dst)\n row[col] = column_values(obj, column_src)\n\n self._data.append(row)", "def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))", "def describe_locations():\n pass", "def location():\n\n tablename = \"%s_%s\" % (module, resourcename)\n table = db[tablename]\n\n # Allow prep to pass vars back to the controller\n vars = {}\n\n # @ToDo: Clean up what needs to be done only for interactive views,\n # vs. what needs to be done generally. E.g. some tooltips are defined\n # for non-interactive.\n # Pre-processor\n def prep(r, vars):\n\n def get_location_info():\n query = (db.gis_location.id == r.id)\n return db(query).select(db.gis_location.lat,\n db.gis_location.lon,\n db.gis_location.level,\n limitby=(0, 1)).first()\n\n # Restrict access to Polygons to just MapAdmins\n if deployment_settings.get_security_map() and not s3_has_role(\"MapAdmin\"):\n table.code.writable = False\n if r.method == \"create\":\n table.code.readable = False\n table.gis_feature_type.writable = table.gis_feature_type.readable = False\n table.wkt.writable = table.wkt.readable = False\n elif r.interactive:\n table.code.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Code\"),\n T(\"For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.\")))\n table.wkt.comment = DIV(_class=\"stickytip\",\n _title=\"WKT|%s %s%s %s%s\" % (T(\"The\"),\n \"<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>\",\n T(\"Well-Known Text\"),\n \"</a>\",\n T(\"representation of the Polygon/Line.\")))\n\n if r.method == \"update\" and r.id:\n # We don't allow converting a location group to non-group and\n # vice versa. We also don't allow taking away all the members of\n # a group -- setting \"notnull\" gets the \"required\" * displayed.\n # Groups don't have parents. (This is all checked in onvalidation.)\n # NB r.id is None for update.url\n location = get_location_info()\n if location.level == \"GR\":\n table.level.writable = False\n table.parent.readable = table.parent.writable = False\n table.members.notnull = True\n # Record that this is a group location. Since we're setting\n # level to not writable, it won't be in either form.vars or\n # request.vars. Saving it while we have it avoids another\n # db access.\n response.s3.location_is_group = True\n else:\n table.members.writable = table.members.readable = False\n response.s3.location_is_group = False\n\n if r.interactive:\n if not \"group\" in r.request.vars:\n # Hide the Members List (a big download when many records are entered)\n table.members.writable = table.members.readable = False\n # Don't show street address, postcode for hierarchy on read or update.\n if r.method != \"create\" and r.id:\n try:\n location\n except:\n location = get_location_info()\n if location.level:\n table.addr_street.writable = table.addr_street.readable = False\n table.addr_postcode.writable = table.addr_postcode.readable = False\n\n # Options which are only required in interactive HTML views\n table.level.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Level\"),\n T(\"If the location is a geographic area, then state at what level here.\")))\n parent_comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Parent\"),\n T(\"The Area which this Site is located within.\")))\n if r.representation == \"popup\":\n table.parent.comment = parent_comment\n else:\n # Include 'Add Location' button\n table.parent.comment = DIV(A(ADD_LOCATION,\n _class=\"colorbox\",\n _href=URL(r=request, c=\"gis\", f=\"location\",\n args=\"create\",\n vars=dict(format=\"popup\",\n child=\"parent\")),\n _target=\"top\",\n _title=ADD_LOCATION),\n parent_comment),\n table.osm_id.comment = DIV(_class=\"stickytip\",\n _title=\"OpenStreetMap ID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://openstreetmap.org' target=_blank>OpenStreetMap</a> ID. \",\n T(\"If you know what the OSM ID of this location is then you can enter it here.\")))\n table.geonames_id.comment = DIV(_class=\"stickytip\",\n _title=\"Geonames ID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://geonames.org' target=_blank>Geonames</a> ID. \",\n T(\"If you know what the Geonames ID of this location is then you can enter it here.\")))\n table.comments.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Comments\"),\n T(\"Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.\")))\n\n if r.representation == \"iframe\":\n # De-duplicator needs to be able to access UUID fields\n table.uuid.readable = table.uuid.writable = True\n table.uuid.label = \"UUID\"\n table.uuid.comment = DIV(_class=\"stickytip\",\n _title=\"UUID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://eden.sahanafoundation.org/wiki/UUID#Mapping' target=_blank>Universally Unique ID</a>. \",\n T(\"Suggest not changing this field unless you know what you are doing.\")))\n\n if r.method in (None, \"list\") and r.record is None:\n # List\n pass\n elif r.method in (\"delete\", \"search\"):\n pass\n else:\n # Add Map to allow locations to be found this way\n config = gis.get_config()\n lat = config.lat\n lon = config.lon\n zoom = config.zoom\n feature_queries = []\n\n if r.method == \"create\":\n add_feature = True\n add_feature_active = True\n else:\n if r.method == \"update\":\n add_feature = True\n add_feature_active = False\n else:\n # Read\n add_feature = False\n add_feature_active = False\n\n try:\n location\n except:\n location = get_location_info()\n if location and location.lat is not None and location.lon is not None:\n lat = location.lat\n lon = location.lon\n # Same as a single zoom on a cluster\n zoom = zoom + 2\n\n # @ToDo: Does map make sense if the user is updating a group?\n # If not, maybe leave it out. OTOH, might be nice to select\n # admin regions to include in the group by clicking on them in\n # the map. Would involve boundaries...\n _map = gis.show_map(lat = lat,\n lon = lon,\n zoom = zoom,\n feature_queries = feature_queries,\n add_feature = add_feature,\n add_feature_active = add_feature_active,\n toolbar = True,\n collapsed = True)\n\n # Pass the map back to the main controller\n vars.update(_map=_map)\n return True\n response.s3.prep = lambda r, vars=vars: prep(r, vars)\n\n # Options\n _vars = request.vars\n filters = []\n\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n # We want to do case-insensitive searches\n # (default anyway on MySQL/SQLite, but not PostgreSQL)\n _parent = parent.lower()\n\n # Can't do this using a JOIN in DAL syntax\n # .belongs() not GAE-compatible!\n query = (db.gis_location.name.lower().like(_parent))\n filters.append((db.gis_location.parent.belongs(db(query).select(db.gis_location.id))))\n # ToDo: Make this recursive - want descendants not just direct children!\n # Use new gis.get_children() function\n\n # ToDo\n # bbox = _vars.get(\"bbox\", None):\n\n if filters:\n from operator import __and__\n response.s3.filter = reduce(__and__, filters)\n\n caller = _vars.get(\"caller\", None)\n if caller:\n # We've been called as a Popup\n if \"gis_location_parent\" in caller:\n # Hide unnecessary rows\n table.addr_street.readable = table.addr_street.writable = False\n else:\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n table.parent.default = parent\n\n # Hide unnecessary rows\n table.level.readable = table.level.writable = False\n table.geonames_id.readable = table.geonames_id.writable = False\n table.osm_id.readable = table.osm_id.writable = False\n table.source.readable = table.source.writable = False\n table.url.readable = table.url.writable = False\n\n level = _vars.get(\"level\", None)\n if level:\n # We've been called from the Location Selector widget\n table.addr_street.readable = table.addr_street.writable = False\n\n output = s3_rest_controller(module, resourcename)\n\n _map = vars.get(\"_map\", None)\n if _map and isinstance(output, dict):\n output.update(_map=_map)\n\n return output", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def stats_data_by_location():\n by_city = db_session.query(Locations.city, func.count(MediaFiles.id)) \\\n .outerjoin(MediaFiles, MediaFiles.location_id == Locations.id) \\\n .group_by(Locations.city) \\\n .all()\n by_country = db_session.query(Locations.country, func.count(MediaFiles.id)) \\\n .outerjoin(MediaFiles, MediaFiles.location_id == Locations.id) \\\n .group_by(Locations.country) \\\n .all()\n data_by_city = [{'name': item[0].title(), 'y': item[1]} for item in by_city]\n data_by_country = [{'name': item[0].title(), 'y': item[1]} for item in by_country]\n return data_by_city, data_by_country", "def initialize(self) -> None:\n all_queries: List[Union[DataQueryInfo, MeasureQueryInfo]] = []\n entity_cells: List[DataCell] = []\n current_row_group = None\n\n # Loop over rows, columns\n for row_index, row in enumerate(self.rows):\n if isinstance(row, RowSeparator):\n current_row_group = row.name\n continue\n entity: Entity = row.entity\n if isinstance(entity, Entity):\n self.entity_map[entity.get_marquee_id()] = entity\n else:\n self.entity_map[''] = entity\n cells: List[DataCell] = []\n row_overrides = row.overrides\n\n for column_index, column in enumerate(self.columns):\n column_name = column.name\n column_processor = column.processor\n\n # Get all the data coordinate overrides and apply the processor override if it exists\n data_overrides, value_override, processor_override = _get_overrides(row_overrides, column_name)\n\n # Create the cell\n cell: DataCell = DataCell(column_name,\n column_processor,\n entity,\n data_overrides,\n column_index,\n row_index,\n current_row_group)\n\n if processor_override:\n # Check if there is a processor override and apply if so\n cell.processor = processor_override\n if value_override:\n cell.value = ProcessorResult(True, value_override.value)\n cell.updated_time = get_utc_now()\n elif isinstance(column_processor, EntityProcessor):\n # store these cells to fetch entity data during poll\n entity_cells.append(cell)\n elif isinstance(column_processor, CoordinateProcessor):\n # store these cells to fetch entity data during poll\n if len(data_overrides):\n # Get the last in the list if more than 1 override is given\n cell.processor.children['a'].set_dimensions(data_overrides[-1].dimensions)\n\n self._coord_processor_cells.append(cell)\n elif column_processor.measure_processor:\n all_queries.append(MeasureQueryInfo(attr='', entity=entity, processor=column_processor))\n else:\n # append the required queries to the map\n cell.build_cell_graph(all_queries, self.rdate_entity_map)\n\n cells.append(cell)\n\n self._cells.extend(cells)\n self.results.append(cells)\n\n self._data_queries = all_queries\n self._entity_cells = entity_cells\n self.is_initialized = True", "def append_locations(self, newlocs: List):\n self.locations.extend(newlocs)", "def build_localisations(csv_locations):\n # Open each of the files\n # Build the dictionaries by column\n localisations = []\n for csv_location in csv_locations:\n with open(csv_location) as csvfile:\n reader = csv.DictReader(csvfile)\n locs = {}\n for key in reader.fieldnames:\n locs[key] = []\n for row in reader:\n for key in reader.fieldnames:\n locs[key].append(row[key])\n\n localisations.append(locs)\n return localisations", "def load_store_location_data():\n try:\n store_location_data = []\n with open('store-locations.csv') as csvfile:\n reader = csv.DictReader(csvfile, dialect=csv.excel)\n for row in reader:\n store_location_data.append(dict(OrderedDict(row)))\n\n return store_location_data\n except IOError as error:\n print(\"I/O error({0}): {1}\".format(error.errno, error.strerror))\n return None", "def _populate_table(self):\n self._table.setSortingEnabled(False)\n self._table.setRowCount(len(self._module_names))\n for i, module_name in enumerate(self._module_names, 0):\n self._table.setItem(i, 0, QtWidgets.QTableWidgetItem(module_name))\n self._table.resizeRowsToContents()\n self._table.setSortingEnabled(True)", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def location_ids(self, location_ids):\n\n self._location_ids = location_ids", "def insert_values():\n pass", "def run(self, locations):\n return locations", "def test_storage_locations_data(\n storage_locations,\n storage_service_id,\n storage_service_name,\n start_date,\n end_date,\n locations_count,\n location_1_aips_count,\n location_1_total_size,\n location_2_aips_count,\n location_2_total_size,\n):\n report = report_data.storage_locations(\n storage_service_id=storage_service_id, start_date=start_date, end_date=end_date\n )\n\n assert report[fields.FIELD_STORAGE_NAME] == storage_service_name\n\n locations = report[fields.FIELD_LOCATIONS]\n assert len(locations) == locations_count\n\n if locations_count < 1:\n return\n\n first_location = locations[0]\n # Account for sorting changes made possible by date filtering.\n if location_1_aips_count > location_2_aips_count:\n assert first_location[fields.FIELD_UUID] == STORAGE_LOCATION_1_UUID\n assert (\n first_location[fields.FIELD_STORAGE_LOCATION]\n == STORAGE_LOCATION_1_DESCRIPTION\n )\n assert first_location[fields.FIELD_AIPS] == location_1_aips_count\n assert first_location[fields.FIELD_SIZE] == location_1_total_size\n else:\n assert first_location[fields.FIELD_UUID] == STORAGE_LOCATION_2_UUID\n assert (\n first_location[fields.FIELD_STORAGE_LOCATION]\n == STORAGE_LOCATION_2_DESCRIPTION\n )\n assert first_location[fields.FIELD_AIPS] == location_2_aips_count\n assert first_location[fields.FIELD_SIZE] == location_2_total_size\n\n second_location = locations[1]\n # Account for sorting changes made possible by date filtering.\n if location_1_aips_count > location_2_aips_count:\n assert second_location[fields.FIELD_UUID] == STORAGE_LOCATION_2_UUID\n assert (\n second_location[fields.FIELD_STORAGE_LOCATION]\n == STORAGE_LOCATION_2_DESCRIPTION\n )\n assert second_location[fields.FIELD_AIPS] == location_2_aips_count\n assert second_location[fields.FIELD_SIZE] == location_2_total_size\n else:\n assert second_location[fields.FIELD_UUID] == STORAGE_LOCATION_1_UUID\n assert (\n second_location[fields.FIELD_STORAGE_LOCATION]\n == STORAGE_LOCATION_1_DESCRIPTION\n )\n assert second_location[fields.FIELD_AIPS] == location_1_aips_count\n assert second_location[fields.FIELD_SIZE] == location_1_total_size", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def getData(locations, soup):\n chromeIndex = locations.index('On chromosome')\n positionIndex = locations.index('Chromosome position')\n if 'Summary' in locations:\n traitIndex = locations.index('Summary')\n else:\n traitIndex = locations.index('Trait')\n\n tvalues = soup('table')[1].find_all('td')\n chromosome = tvalues[chromeIndex].find_all('span')[0].get_text().split()[0]\n position = ''.join(tvalues[positionIndex].find_all('span')[0].get_text().split()[0].split(','))\n trait = tvalues[traitIndex].find_all('span')[0].get_text().split(u\"\\u00A0\")[0]\n result = {\n 'chromosome': chromosome,\n 'position': position,\n 'trait': trait\n }\n return result", "def location_links():\n\n try:\n record_id = request.args[0]\n except:\n item = s3xrc.xml.json_message(False, 400, \"Need to specify a record ID!\")\n raise HTTP(400, body=item)\n\n try:\n # Shortcut\n locations = db.gis_location\n\n deleted = (locations.deleted == False)\n query = (locations.id == record_id)\n query = deleted & query\n record = db(query).select(locations.id, limitby=(0, 1)).first().id\n except:\n item = s3xrc.xml.json_message(False, 404, \"Record not found!\")\n raise HTTP(404, body=item)\n\n # Find all tables which link to the Locations table\n # @ToDo: Replace with db.gis_location._referenced_by\n tables = shn_table_links(\"gis_location\")\n\n results = []\n for table in tables:\n for count in range(len(tables[table])):\n field = tables[str(db[table])][count]\n query = db[table][field] == record_id\n _results = db(query).select()\n module, resourcename = table.split(\"_\", 1)\n for result in _results:\n id = result.id\n # We currently have no easy way to get the default represent for a table!\n try:\n # Locations & Persons\n represent = eval(\"shn_%s_represent(id)\" % table)\n except:\n try:\n # Organisations\n represent = eval(\"shn_%s_represent(id)\" % resourcename)\n except:\n try:\n # Many tables have a Name field\n represent = (id and [db[table][id].name] or [\"None\"])[0]\n except:\n # Fallback\n represent = id\n results.append({\n \"module\" : module,\n \"resource\" : resourcename,\n \"id\" : id,\n \"represent\" : represent\n })\n\n output = json.dumps(results)\n return output" ]
[ "0.6048028", "0.6013519", "0.59421986", "0.5901815", "0.58811194", "0.5729807", "0.5710549", "0.56738675", "0.56535035", "0.554302", "0.5542974", "0.55208075", "0.5469795", "0.54697716", "0.5454492", "0.54353935", "0.5430382", "0.542676", "0.5424422", "0.5422587", "0.54031056", "0.537971", "0.5377886", "0.5370541", "0.53700954", "0.53338283", "0.5321584", "0.5311861", "0.5309858", "0.52906114", "0.527486", "0.52735823", "0.52685744", "0.5267895", "0.5265497", "0.5261579", "0.5250655", "0.5245967", "0.5245141", "0.5244098", "0.5243704", "0.52265453", "0.5211909", "0.520407", "0.5203368", "0.52015173", "0.5197916", "0.51911277", "0.51893866", "0.5180723", "0.5165066", "0.5147126", "0.5125742", "0.51232415", "0.51221913", "0.51174223", "0.5116288", "0.5114895", "0.511281", "0.51093024", "0.5104184", "0.51007247", "0.50942427", "0.5093816", "0.5088278", "0.50851643", "0.5075813", "0.5073255", "0.5067276", "0.5056906", "0.5052169", "0.5050736", "0.5048659", "0.50483847", "0.50458026", "0.5039353", "0.50260437", "0.5025127", "0.5020552", "0.5017206", "0.50163686", "0.5012871", "0.50111973", "0.50108093", "0.500284", "0.499562", "0.498823", "0.49820465", "0.49819988", "0.49792093", "0.49736127", "0.49655977", "0.4964482", "0.49619696", "0.4961521", "0.49591538", "0.49570963", "0.4954403", "0.4953951", "0.49488878" ]
0.6565873
0
Check if given forecast dictionary contains a numeric value with provided key.
def is_forecast_number(key, forecast): return key in forecast and type(forecast[key]) in [float, int]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def contains_200(dictnr):\n contains = False\n for i in dictnr:\n if dictnr[i] == 200:\n contains = True\n print(contains)", "def is_key(number):\n res = False\n if is_integer(number):\n if int(number) > 0:\n res = True\n return res", "def _cast_to_number(self, key):\n q = DBSession.query(cast(self.db_value.value, Float)). \\\n join(self.db_tag). \\\n join(self.db_key). \\\n filter(self.db_key.key == key)\n try:\n q.all()\n return True\n except:\n return False", "def hasValue(self, key):\n return self.has_key('__' + key)", "def _has_science_data(data_dict, particle_class):\n return_value = False\n\n # Modified to make this check more efficient\n if len(particle_class.science_parameters) < len(data_dict):\n for key in particle_class.science_parameters:\n value = data_dict.get(key, None)\n if value is not None and not(isnan(float(value))):\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n else:\n for key, value in data_dict.iteritems():\n if not (isnan(float(value))) and key in particle_class.science_parameters:\n return_value = True\n break\n if particle_class._data_particle_type == 'glider_eng_telemetered':\n log.info(\"GliderParser._has_science_data failed: key=[%s] value=[%s]\", key, value)\n\n return return_value", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def contains(self, key):\n if key in self.nums:\n return True\n return False", "def contains_double_count(key, value, similarity_dict):\n if value in similarity_dict.keys():\n if key in similarity_dict[value]:\n return True\n return False", "def data_dict_points(data_dict, feature):\n return len(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict))", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_zero_dict( dict ):\n has_any_features = False\n for key in dict:\n has_any_features = has_any_features or dict[key]\n\n return not has_any_features", "def exists(field):\n try:\n float(field)\n return True\n except:\n return False", "def contains(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tif self.ba[i] <= 0:\n\t\t\t\treturn False\n\t\treturn True", "def anyMoreThanOne(dict, keys):\n\tfor key in keys:\n\t\tif key in dict and dict[key] > 0:\n\t\t\treturn True\n\treturn False", "def values_are_pandas_numbers(values: List[str]):\n for v in values:\n try:\n float(v)\n except ValueError:\n return False\n return True", "def __contains__(self, k) :\n return k in self.precision()", "def contains_value(kv_json, value):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n for key in kv_dict:\n if kv_dict[key] == value: # Found value in dictionary\n return True\n return False\n else:\n print(\"Provide A JSON Key Value String\")", "def is_float(possible_number):\r\n try:\r\n float(possible_number)\r\n return True\r\n except ValueError:\r\n return False", "def checkNaN(data_dict):\n for k, v in data_dict.iteritems():\n mark = True\n for feature, value in v.iteritems():\n if (value != 'NaN') and (feature != 'poi'):\n mark = False\n break\n if mark:\n print k\n print v['poi']", "def _check_feature_by_keys(service_data=None, service_keys=None, ns_data=None, ns_keys=None):\n\n if service_data and not isinstance(service_data, Exception) and service_keys:\n if _is_keyval_greater_than_value(service_data, service_keys):\n return True\n\n if ns_data and ns_keys:\n for ns, nsval in ns_data.iteritems():\n if not nsval or isinstance(nsval, Exception):\n continue\n if _is_keyval_greater_than_value(nsval, ns_keys):\n return True\n\n return False", "def contains_key(kv_json, key):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n try:\n res = kv_dict[key]\n return True\n except KeyError:\n return False\n else:\n print(\"Provide A JSON Key Value String\")", "def validateNumber(key, value):\n if value is None or isinstance(value, (int, float)) and not isinstance(value, bool):\n return None\n else:\n return {'error': 'invalid value: %s (%s), valid values number/null' % (value, pythonTypeToJSONType(value))}", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def haskey(featureVals, fkey):\n try:\n featureVals[fkey]\n except KeyError:\n return False\n\n #warn(HASKEYMSG % (fkey))\n return True", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def has_value(cls, value):\n return bool(isinstance(value, numbers.Number) or isinstance(value, time) or \\\n isinstance(value, datetime) or value)", "def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)", "def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def contains(self, key: int) -> bool:\n y = key % 80\n return key in self.arr[y]", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def func3(key):\n value = my_test_dict.get(key)\n if value is None:\n return False\n else:\n return True", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def check_for_float(check):", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def check_for_float_and_int(check):", "def is_item_in_the_response(key, value, jsonResponse):\n flag = False\n for item in jsonResponse:\n if type(jsonResponse[item]) == int:\n if item == key and jsonResponse[item] == int(value):\n flag = True\n\n if type(jsonResponse[item]) == str:\n if item == key and jsonResponse[item] == str(value):\n flag = True\n\n if type(jsonResponse[item]) == bool:\n if item == key and jsonResponse[item] == bool(value):\n flag = True\n else:\n #log and error\n pass\n return flag", "def is_numeric(rows, col):\n return rows.dtypes.values[col] in numerics", "def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False", "def func4(key):\n return key in list(my_test_dict.keys())", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def rsi_trend_positive(rsi_dict):\n\n rsi_iterable = list(rsi_dict.items())\n\n # Only looking at the last 10 days of data.\n test_batch = [float(x[1]['RSI']) for x in rsi_iterable[0:10]]\n\n if sum(test_batch) / len(test_batch) < test_batch[0]:\n return True\n\n return False", "def valuecall(key, atom_dict):\n if key not in atom_dict:\n return 0\n else:\n return atom_dict[key]", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def contains(self, val: float) -> bool:\n return self._check_lower(val) and self._check_upper(val)", "def find(self, value):\n for key in self.dict:\n if (value-key==key and self.dict[key]>=2) or (value-key!=key and value-key in self.dict):\n return True\n \n return False", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def is_float(word):\n try:\n float(word)\n return True\n except ValueError:\n return False", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def contains(\n self, key: int | str | ir.IntegerValue | ir.StringValue\n ) -> ir.BooleanValue:\n return ops.MapContains(self, key).to_expr()", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def has_number(any_string):\n return any(char.isdigit() for char in any_string)", "def data_processing(data_dic: Dict[str, int]):\n\n sum_0 = 0\n for key, value in data_dic.items():\n if int(list(key)[0]) + int(list(key)[1]) == 0:\n sum_0 += value\n return sum_0 / shots", "def has(self, key: str) -> Any:\n return key in self.variables", "def is_number(text):\n return text.lower() in AVRO_NUMBERS", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def get_serendipity_val(dic, key):\n # The key was in the training set\n try:\n return dic[key]\n # The key wasn't in the training set, then the serendipity is 1\n except KeyError:\n return 1.0", "def isNumericCategory(self, category):\r\n category_values = self.getCategoryValues(self.SampleIds, category)\r\n\r\n is_numeric = True\r\n for category_value in category_values:\r\n try:\r\n float(category_value)\r\n except ValueError:\r\n is_numeric = False\r\n return is_numeric", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_float(value):\n try:\n float(value)\n except ValueError:\n return False\n else:\n return True", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n float(value.replace(',', ''))\n except ValueError:\n return False\n return True", "def _is_positive_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item > 0", "def are_numeric(*values):\n\n for value in values:\n if not is_numeric(value):\n return False\n return True", "def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False", "def checkIndex(key):\n if not isinstance(key, (int, float)): raise TypeError\n if key<0: raise IndexError", "def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False", "def contains(self, key: int) -> bool:\n return key in self.data[key % self._MOD]", "def test_if_keys_or_values_in_result_dict_are_int(self):\n for key, value in add_expressions(1, 2, 8)(2, 3).items():\n self.assertIsInstance(key, int)\n self.assertIsInstance(value, int)", "def is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def num_present(key, value, board):\n row, col = find_box(key) # checks for box\n for x in rows[row]:\n for y in cols[col]:\n if str(x + str(y)) != key and board[x + str(y)] == value:\n return True\n # checks for column\n for x in ROW:\n if str(x + str(key[1])) != key and board[x + str(key[1])] == value:\n return True\n # checks for row\n for i in COL:\n if str(key[0] + str(i)) != key and board[key[0] + str(i)] == value:\n return True\n return False", "def isSetNumericValue(self):\n return _libsbml.PossibleSpeciesFeatureValue_isSetNumericValue(self)", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def value_is_float_not_int(value):\n # this is klugy and only needed to display deprecation warnings\n try:\n int(value)\n return False\n except ValueError:\n try:\n float(value)\n return True\n except ValueError:\n return False\n except TypeError:\n return False", "def check_value(self, key: str, value: Any):\n # Check the value with a set of tests\n self._check_missing(key, value)\n self._check_allowed_values(key, value)\n self._check_data_type(key, value)\n self._check_value_range(key, value)", "def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False", "def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass" ]
[ "0.6130827", "0.6060219", "0.5976841", "0.5809231", "0.5789039", "0.5752602", "0.56971747", "0.5649147", "0.56462014", "0.5644481", "0.56440645", "0.5638638", "0.56340736", "0.560666", "0.5580076", "0.55792755", "0.55783355", "0.55679023", "0.55529094", "0.5530956", "0.5520167", "0.55132616", "0.5469083", "0.54569", "0.54385626", "0.54303396", "0.54266495", "0.5414168", "0.5401341", "0.54001594", "0.5395861", "0.5395672", "0.5385847", "0.537121", "0.53685254", "0.53639364", "0.53529483", "0.53525096", "0.5344761", "0.53408945", "0.5338632", "0.53309506", "0.5324901", "0.5310373", "0.5310211", "0.530742", "0.5304139", "0.53029907", "0.5301828", "0.5277144", "0.5269609", "0.5267187", "0.52419263", "0.52403307", "0.5234311", "0.5231246", "0.5223664", "0.52150464", "0.5215039", "0.5208167", "0.51986223", "0.5184296", "0.51812357", "0.5174362", "0.5171757", "0.5169111", "0.5168917", "0.51650286", "0.5162473", "0.5162473", "0.5162473", "0.5161335", "0.51582676", "0.51552325", "0.5153366", "0.51504785", "0.5146285", "0.5139923", "0.513948", "0.51351917", "0.5134928", "0.5134636", "0.5126503", "0.5119759", "0.51196444", "0.5116325", "0.51154494", "0.5115187", "0.51132905", "0.51070684", "0.5106551", "0.5105965", "0.5093936", "0.50907564", "0.50854605", "0.507771", "0.5077662", "0.50716114", "0.5070471", "0.5067336" ]
0.842359
0
Populate weather data tables.
def populate_weather(connection): metadata = load_metadata('weather') cursor = connection.cursor() water_defs = get_water_definitions() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM weather') weather_count = cursor.fetchone()[0] if weather_count: print('Weather tables already populated!') return print('WEATHER:') # Darksky data for dir_name, location in metadata.items(): print(f'\tPopulating weather: "{location["name"]}".') # Insert location. cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') location_id = cursor.lastrowid # Set weather locations for watercourses/aquifers. for water_body in [d['body'] for d in water_defs.values()]: if water_body in location: cursor.execute(f'''UPDATE {water_body}s SET location_id = {location_id} WHERE name IN ('{"','".join(location[water_body])}')''') break dir_path = get_data_path('weather', 'raw', dir_name) for json_file_name in os.listdir(dir_path): json_path = os.path.join(dir_path, json_file_name) with open(json_path, 'r', encoding='utf-8') as json_file: print(f'\t\tPopulating year: {json_file_name[0:-5]}') year_forecasts = json.load(json_file) for date, date_forecast in year_forecasts.items(): hourly_forecasts = date_forecast['hourly'] if not hourly_forecasts: print(f'\t\tNo hourly forecasts for {date}!') continue daily_forecast = { 'location_id': location_id, 'time': date_forecast['time'], 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'], 'precipitation': 0, 'snow_accumulation': 0 } # List of value names with `avg`, `min` and `max` values value_names = { 'temperature': 'temperature', 'cloud_cover': 'cloudCover', 'dew_point': 'dewPoint', 'humidity': 'humidity', 'pressure': 'pressure', 'uv_index': 'uvIndex', 'precipitation_probability': 'precipProbability', 'precipitation_intensity': 'precipIntensity' } # Value name counters, which indicate how many times (out of 24) # certain value appears in hourly data. value_counts = {k: 0 for k in value_names.keys()} for value_name in value_names.keys(): daily_forecast[f'{value_name}_avg'] = 0.0 daily_forecast[f'{value_name}_min'] = float('inf') daily_forecast[f'{value_name}_max'] = float('-inf') # Calculate daily forecast values from hourly forecasts. for hourly_forecast in hourly_forecasts: for value_name in value_names.keys(): orig_value_name = value_names[value_name] if is_forecast_number(orig_value_name, hourly_forecast): daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name] daily_forecast[f'{value_name}_min'] = min( hourly_forecast[orig_value_name], daily_forecast[f'{value_name}_min'] ) daily_forecast[f'{value_name}_max'] = max( hourly_forecast[orig_value_name], daily_forecast[f'{value_name}_max'] ) value_counts[value_name] += 1 if is_forecast_number('precipAccumulation', hourly_forecast) \ and hourly_forecast['precipType'] == 'snow': daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation'] elif is_forecast_number('precipIntensity', hourly_forecast) \ and is_forecast_number('precipProbability', hourly_forecast): daily_forecast['precipitation'] += \ hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability'] for value_name, value_count in value_counts.items(): if value_count: # Calculate average. daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count else: # If value never appeared daily_forecast[f'{value_name}_avg'] = 'NULL' daily_forecast[f'{value_name}_min'] = 'NULL' daily_forecast[f'{value_name}_max'] = 'NULL' cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())}) VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''') # IOT data: for location in SETTINGS['weather_locations_iot']: print(f'\tPopulating weather: "{location["name"]}".') # Insert location. cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') location_id = cursor.lastrowid # Set weather locations for watercourses/aquifers. for water_body in [d['body'] for d in water_defs.values()]: if water_body in location: cursor.execute(f'''UPDATE {water_body}s SET location_id = {location_id} WHERE name IN ('{"', '".join(location[water_body])}')''') # Set locations for all stations on given water body to match its location. cursor.execute(f'''SELECT id FROM {water_body}s WHERE location_id = {location_id}''') ids = [row[0] for row in cursor.fetchall()] if len(ids): cursor.execute(f'''UPDATE {water_body}_stations SET location_id = {location_id} WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''') break file_name = f'''{location['lat']}-{location['lng']}.json''' json_path = get_data_path('weather', 'raw', file_name) # If data file doesn't exist, download it first. if not os.path.isfile(json_path): with open(json_path, 'wb', encoding="utf-8") as file: file.write(read_from_url(location['url'], decode=False)) with open(json_path, 'r', encoding='utf-8') as json_file: row_names = { "Sun_duration": "sun_duration", "CloudCover": "cloud_cover_avg", "Percipitation": "precipitation", "New_snow_blanket": "snow_accumulation", "Snow_blanket": "snow_depth", "TemperatureAvg": "temperature_avg", "TemperatureMin": "temperature_min", "TemperatureMax": "temperature_max" } forecasts = json.load(json_file) for forecast in forecasts: f = {row_names[k]: forecast[k] for k in row_names.keys()} f['location_id'] = location_id f['time'] = round(forecast['LastUpdatedEpoch'] / 1000) cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())}) VALUES ({', '.join([str(v) for v in f.values()])})''')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def init_and_update_observe_table(self):\n # print CHN_CITY_LIST_FILE\n location = ''\n id = ''\n f = open(CHN_CITY_LIST_FILE, 'r')\n for line in f.readlines():\n line_list = line.strip('\\n').split(':')\n location = line_list[0]\n id = line_list[1]\n pm = get_pm(location)\n # get current weather\n weather_dict = get_open_weather(id)\n if weather_dict not in ('', None, [], {}):\n if 'error' in pm or pm == False:\n weather_dict['aqi'] = '无数据'#'N/A'\n else:\n weather_dict['aqi'] = pm['quality'] + '(' + str(pm['aqi']) + ')'\n db_record = self.db.search_observe_record(str(id))\n # db_record = []\n now_date = get_local_format_time()\n if db_record != []:#update\n self.db.update_observe_data(weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'],id)\n else:#insert\n self.db.insert_observe_data(id,weather_dict['city'],weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'])\n f.close()\n return True", "def insert_humans_staging(self):\n for year in range(1880, CURRENT_YEAR):\n self.load_wikidata(\"humans\", HUMANS_BY_YEAR_SPARQL_QUERY, INSERT_HUMAN_SQL_QUERY,\n INSERT_HUMAN_MAP_COLUMNS, year=year)", "def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def sql_functions(cities, weather):\n con = lite.connect(\"sql_database.db\")\n tables_tuple = (\"cities\", \"weather\")\n with con:\n #Generate tables in database:\n cur = con.cursor() #Get cursor object\n for table in tables_tuple:\n cur.execute(\"DROP TABLE IF EXISTS {0}\".format(table)) #Drop tables if they already exist.\n cur.execute(\"CREATE TABLE cities (name text, state text)\")\n cur.execute(\"CREATE TABLE weather (city text, year integer, warm_month text, cold_month text, average_high integer)\")\n #Populate tables in database:\n cur.executemany(\"INSERT INTO cities VALUES (?,?)\", cities)\n cur.executemany(\"INSERT INTO weather VALUES (?,?,?,?,?)\", weather)\n #Retrieve data from database:\n cur.execute(\"SELECT * FROM cities INNER JOIN weather ON city = name\")\n rows = cur.fetchall()\n cols = [desc[0] for desc in cur.description]\n output_dataframe = pd.DataFrame(rows, columns = cols)\n \n return output_dataframe", "def create_tables(cxn):\n\tcursor = cxn.cursor()\n\tcursor.execute(\"DROP TABLE IF EXISTS WEATHER\")\n\tcursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS WEATHER(\n\t\tstate varchar(3),\n\t\tcity varchar (15),\n\t\tobs_date varchar(12),\n\t\thour int,\n\t\tminute int,\n\t\tcurr_temp float,\n\t\tunique(state, city, obs_date, hour, minute)\n\t\t)\"\"\")\n\tcursor.close()", "def create_db(temp: list, rain: list, humidity: list, wind: list) -> dict:\r\n weather = {}\r\n for i in range(len(temp)):\r\n weather[i+1] = [temp[i], rain[i], humidity[i], wind[i]]\r\n return weather", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")", "def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)", "def import_weather(keys):\n # imports weather and cleans\n df_all_weather = get_weather_as_df(keys)\n return clean_weather_df(df_all_weather)", "def update_weather(location_request, db):\n with open(expanduser(\"~/bin/my_utilities/config/darksky-key\")) as f:\n ds_key = f.readline().strip()\n current = []\n current_day = 0\n with forecast(ds_key, *location_request, units=\"uk2\") as location:\n raw = location['hourly']['data'][0]\n current.append(datetime.datetime.now().hour)\n current.append(day_relative_to_absolute(current_day))\n current.append(raw[\"temperature\"])\n current.append(raw[\"apparentTemperature\"])\n current.append(raw[\"precipIntensity\"])\n current.append(raw[\"precipProbability\"] * 100)\n current.append(raw[\"humidity\"] * 100)\n current.append(raw[\"dewPoint\"])\n current.append(raw[\"windSpeed\"])\n current.append(raw[\"windBearing\"])\n current.append(raw[\"windGust\"])\n current.append(raw[\"pressure\"])\n current.append(raw[\"cloudCover\"] * 100)\n current.append(raw[\"uvIndex\"])\n current.append(raw[\"visibility\"])\n current = format_list_for_db(current)\n\n columns = [\"hour\", \"day\", \"temp\", \"apptemp\", \"precipint\", \"precipprob\",\n \"humidity\", \"dewpoint\", \"windspeed\", \"windbearing\",\n \"windgust\", \"pressure\", \"cloudcover\", \"uvindex\", \"visibility\"]\n columns = format_list_for_db(columns)\n statement = f\"INSERT INTO WEATHER {columns} VALUES {current}\"\n print(statement)\n cursor = db.cursor()\n cursor.execute(statement)\n cursor.close()", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)", "def setUpClass(cls):\n dt_index = pd.date_range(start=datetime(2019, 1, 1, 0, 1), periods=15,\n freq='1Min')\n\n # Create a temperature array with an average of 2.\n temp = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n # Create ghi array with an average of 3.\n ghi = [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4]\n\n # Create DataFrame.\n cls.weather_data = pd.DataFrame({'temperature': temp, 'ghi': ghi},\n index=dt_index)\n\n # Create expected data.\n dt_index_2 = pd.date_range(start=datetime(2019, 1, 1, 0, 15), periods=1,\n freq='15Min')\n cls.expected_data = pd.DataFrame({'temperature': [2], 'ghi': [3]},\n index=dt_index_2)", "def read_weather(self):\n print \"Reading weather data from file\",self.datafile\n tab = ascii.read(self.datafile)\n \n # Fix 'T' values in precipitation column, which represent tiny\n # amounts of rain (not measurable)\n TINY_VALUE = '.005' # 0.005 is half the smallest measurable value\n rain = tab['PrecipitationIn']\n wbad = (rain == 'T')\n rain[wbad] = TINY_VALUE\n rain = numpy.array(rain).astype(\"float\")\n\n # Replace string version of precip with float version\n tab['PrecipIn'] = rain\n tab.remove_column('PrecipitationIn')\n\n self.table = tab", "def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")", "def create_table(self):\n # Connect to database\n conn = sqlite3.connect(self)\n # Create a cursor\n c = conn.cursor()\n\n # Create a Table\n c.execute(\"\"\"CREATE TABLE weather (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n sensor text,\n location text,\n temperature real,\n description text,\n time text\n )\"\"\")\n # Commit our command\n conn.commit()\n # Close our connection\n conn.close()", "def init_datasets(self, dataset_names, columns):\n for dataset_name in dataset_names:\n hdf5_dataset_name = self.schema.get(dataset_name)\n if hdf5_dataset_name is None:\n warnings.warn(\"Skipping %s (not in schema)\" % dataset_name)\n else:\n self[dataset_name] = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=self.query_start,\n end=self.query_end_plusplus,\n timestep=self.timestep,\n num_columns=len(columns),\n column_names=columns,\n sort_hex=self.sort_hex)", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def create_all_tables(self):\n pass", "def populate(table_name, date):\n\tlog_msg3(\"Populando \" + table_name)\n\n\twsq_to_txt(table_name, date)\n\n\t# si es un nuevo año se crea una nueva tabla\n\tif(is_new_year(table_name) and not new_tables_created):\n\t\tcreate_tables()\n\n\ttxt_to_table(table_name)\n\n\tlog_msg_ok3()", "def get_gis_historical_data():\n logging.info(\"Generating baseline reference and historical weather data.\")\n \n # Initialising function variables\n fake = Faker()\n geolocator = Nominatim()\n config_data = get_config()\n locations = config_data[\"location\"]\n \n # Check if there are no duplicate locations in the config.yaml file.\n if len(locations) != len(set(locations)):\n logging.error(\"Duplicate location found. Please check config.yaml file.\")\n raise ValueError\n \n # Initialise pandas dataframe column name for baseline reference\n # and historical data.\n df_ref = pd.DataFrame(columns=[\"Location\", \"Latitude\"\n ,\"Longitude\", \"Elevation\"\n ,\"Timezone\"])\n df_hist = pd.DataFrame(columns=[\"Location\", \"Date\"\n ,\"Month\", \"Temperature_Min\"\n ,\"Temperature_Max\", \"Humidity\"\n ,\"Pressure\"])\n \n # Generate weather data for each location.\n for idx, loc in enumerate(locations):\n \n logging.info(\"Retrieving geolocation data for {}.\".format(loc))\n \n # Retrieving geolocation data from geopy library.\n loc_data = geolocator.geocode(loc)\n \n logging.info(\"Check if the location {} is valid.\".format(loc))\n if loc_data is None:\n logging.error(\"Invalid location value supplied ({}). Please check config.yaml file.\".format(loc))\n raise ValueError\n logging.info(\"The location {} is valid.\".format(loc))\n \n city = get_city(loc)\n lat = loc_data.latitude\n lon = loc_data.longitude\n \n # Retrieving elevation data for the location.\n elev = get_elevation_data(lat, lon)\n \n for month in range(1, 13):\n \n logging.info(\"Retrieving {} weather data for month {}.\".format(loc, month))\n \n for sample in range(config_data[\"gis\"][\"sampling_number\"]):\n \n temp_min = None\n temp_max = None\n humidity = None\n pressure = None\n \n while temp_min is None or temp_max is None or humidity is None or pressure is None:\n \n year = random.randint(config_data[\"gis\"][\"year_start\"], config_data[\"gis\"][\"year_end\"])\n\n _, last_day = calendar.monthrange(year, month)\n\n datetime_start = datetime.datetime(year, month, 1)\n datetime_end = datetime.datetime(year, month, last_day)\n\n date_gen = fake.date_time_between_dates(datetime_start=datetime_start\n ,datetime_end=datetime_end)\n\n forecast = forecastio.load_forecast(config_data[\"forecastio_api_key\"]\n ,lat\n ,lon\n ,time=date_gen\n ,units=\"si\")\n\n historical_data = forecast.json[\"daily\"][\"data\"][0]\n \n timezone = forecast.json.get(\"timezone\", None)\n temp_min = historical_data.get(\"temperatureMin\", None)\n temp_max = historical_data.get(\"temperatureMax\", None)\n humidity = historical_data.get(\"humidity\", None) * 100\n pressure = historical_data.get(\"pressure\", None)\n \n df_temp_hist = pd.Series(dict(zip(df_hist.columns\n ,[city, date_gen\n ,date_gen.month, temp_min\n ,temp_max, humidity\n ,pressure])))\n \n df_hist = df_hist.append(df_temp_hist, ignore_index=True)\n \n df_temp_ref = pd.Series(dict(zip(df_ref.columns\n ,[city, lat\n ,lon, elev\n ,timezone])))\n df_ref = df_ref.append(df_temp_ref, ignore_index=True)\n \n logging.info(\"Generating position to consolidate latitude, longitude and elevation data\")\n df_pos = df_ref[[\"Latitude\", \"Longitude\", \"Elevation\"]].round(2)\n df_pos[\"Elevation\"] = df_pos[\"Elevation\"].astype(int) \n df_ref[\"Position\"] = df_pos.astype(str).apply(lambda x: \",\".join(x), axis=1)\n \n logging.info(\"Saving baseline reference data.\")\n df_ref.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_reference_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline reference data.\")\n\n logging.info(\"Saving baseline historical data.\")\n df_hist.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_historical_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline historical data.\")", "def setupStockTable(self):\n # Get the date\n # NOTE: This is probably un\n date = datetime.date()\n dateStr = date.month() + \"/\" + date.day() + \"/\" + date.year()\n\n stocks = (\"INTC\", \"AAPL\", \"GOOG\", \"YHOO\", \"SYK\", \"VZ\")\n\n for stock in stocks:\n stockObj = self.securityFactory(stock)\n stockObj.queryAPI()\n\n self.stockDB.query(\"INSERT INTO basic_info (ticker, price, daily_change, company, year_high, year_low, \\\n daily_percent, date, streak) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", (stockObj.target, stockObj.curr, \\\n stockObj.daily_change, stockObj.company,\\\n stockObj.year_high, stockObj.year_low,\\\n stockObj.daily_percent, dateStr, 0))", "def fill_target_table(new_data, curs, conn, overwrite=False):\n for i in new_data:\n connect_database.add_target_to_database(list(i), curs, conn, overwrite_exsiting = overwrite)\n conn.commit()", "def create_forecast_dataset(self):\n pass", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def _update_on_refresh():\n cities = City.query.all()\n\n #Iterates over all cities in the database and updates their value\n for city in cities:\n metric_resp, imperial_resp = _get_open_weather_requests(city.name)\n\n metric_json = metric_resp.json()\n imperial_json = imperial_resp.json()\n\n city.temp_celsius = int(metric_json[MAIN][TEMPERATURE])\n city.temp_fahrenheit = int(imperial_json[MAIN][TEMPERATURE])\n db.session.commit()", "def generate_polynesian_weather_data():\n weather_path = os.path.dirname(os.path.realpath(__file__))\n low_fp = weather_path + \"/polynesia_weather/low/1976/\"\n med_fp = weather_path + \"/polynesia_weather/med/1985/\"\n high_fp = weather_path + \"/polynesia_weather/high/1982/\"\n low_name = \"polynesia_1976\"\n med_name = \"polynesia_1985\"\n high_name = \"polynesia_1982\"\n generate_year_weather_data(low_fp, low_name)\n generate_year_weather_data(med_fp, med_name)\n generate_year_weather_data(high_fp, high_name)", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def run(self):\n\n for table in self.TABLES:\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}_Work\")\n self.dictionary_conn.commit()\n self.logger.info(\"work tables cleared\")\n for id in self.ids:\n drug = self.Drug(self, id)\n if drug.wanted:\n drug.load()\n self.logger.info(\"work tables populated\")\n for table in self.TABLES:\n insert = f\"INSERT INTO {table} SELECT * FROM {table}_Work\"\n self.dictionary_cursor.execute(f\"TRUNCATE TABLE {table}\")\n self.dictionary_cursor.execute(insert)\n self.dictionary_conn.commit()\n self.logger.info(\"live tables ready\")", "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def populate_db(self):\n # Get donors\n log.info(\"Populating donors.\")\n\n self.r.hmset('Thomas', {'donations': '500', 'email': '[email protected]', 'city': 'Athens', 'state': 'GA', 'zip': 30606})\n\n self.r.hmset('Ted', {'donations': '1', 'email': '[email protected]', 'city': 'Memphis', 'state': 'TN', 'zip': 38104})\n\n self.r.hmset(\"Bailey\", {'donations': '1000', 'email': '[email protected]', 'city': 'Washington', 'state': 'DC', 'zip': 12345})", "def weather_data(cities, openweathermap_api_key=openweathermap_api_key):\n L = []\n for c in cities:\n res = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={c}&appid={openweathermap_api_key}&units=imperial')\n L.append(res.json())\n\n df = pd.DataFrame(L)\n df['lon'] = df['coord'].map(op.itemgetter('lon'))\n df['lat'] = df['coord'].map(op.itemgetter('lat'))\n df['Temprature'] = df['main'].map(op.itemgetter('temp'))\n df['Humidity'] = df['main'].map(op.itemgetter('humidity'))\n df['Wind Speed'] = df['wind'].map(op.itemgetter('speed'))\n return df[['name','lon', 'lat','Temprature','Humidity','Wind Speed']]", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.", "def setupTempTables(self):\n cur = self.cursor()\n cur.execute(\"PRAGMA temp_store = MEMORY\")\n for table, columns in self.temp_tables.items():\n cur.execute(\"create temporary table if not exists %s (\" % table + (',').join('\"' + key + '\" ' + val for key, val in columns.items()) + ')')", "def process_table_init(self):\n logging.debug(\"Processing table initialization, %d entries\",\n len(self.table_initialization))\n\n for init_entry in self.table_initialization:\n for table_name, entry_desc in init_entry.items():\n self.air_table[table_name].add_entry(\n table_entry.description_to_entry(entry_desc))", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )", "def update_data():\n etf_prices = get_prices(start=START_DATE, end=END_DATE)\n etf_returns = compute_returns(etf_prices)\n merged_etf_data = etf_prices.merge(etf_returns, right_index=True, left_index=True)\n indicators = compute_indicators(merged_etf_data) # this uses the \"ta\" lib, but it does not need\n # to be imported\n merged_etf_data = merged_etf_data.merge(indicators, right_index=True, left_index=True)\n vix_data = get_vix()\n data = merged_etf_data.merge(vix_data, right_index=True, left_index=True)\n data.to_csv('Data/database.csv')\n return", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def _populate_zone_facts_table(self):\n census_fields = [\n 'poverty_rate', 'fraction_black', 'income_per_capita',\n 'labor_participation', 'fraction_foreign',\n 'fraction_single_mothers', 'acs_lower_rent_quartile',\n 'acs_median_rent', 'acs_upper_rent_quartile'\n ]\n\n zone_types = ['ward', 'neighborhood_cluster', 'census_tract']\n\n query_results = list()\n\n # populate columns accordingly for each zone_specific type\n for zone_type in zone_types:\n field_values = dict()\n\n # get field value for each zone_specific type\n for field in census_fields:\n result = self._census_with_weighting(data_id=field,\n grouping=zone_type)\n field_values[field] = result['items']\n\n zone_specifics = self._get_zone_specifics_for_zone_type(zone_type)\n\n # TODO: add aggregate for each zone_type into table\n for zone in zone_specifics:\n # get not None values so we can added to db\n columns = list()\n values = list()\n for field in census_fields:\n zone_value = field_values[field][zone]\n\n if zone_value is not None:\n columns.append(field)\n values.append(\"'\" + str(zone_value) + \"'\")\n\n # derive column and values strings needed for sql query\n columns = ', '.join(columns)\n columns = 'zone, ' + columns\n\n values = ', '.join(values)\n values = \"'\" + zone + \"', \" + values\n\n q = \"INSERT INTO zone_facts ({cols}) VALUES ({vals})\".format(\n cols=columns, vals=values)\n\n with self.engine.connect() as conn:\n result = conn.execute(q)\n query_results.append(result)\n\n return query_results", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def create_table(self):\n c = self.conn.cursor()\n c.execute(\"CREATE TABLE sensor_data (mac text, name text, temperature real, light integer, moisture real, conductivity real, battery real, ts_utc int, date_iso text, firmware text )\")", "def populate_locations(connection):\n print('Populating locations...')\n cursor = connection.cursor()\n with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file:\n locations = json.load(json_file)\n\n for station_id, location in locations.items():\n cursor.execute(f'''SELECT id \n FROM watercourse_stations \n WHERE id = {station_id}''')\n\n if len(cursor.fetchall()):\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n cursor.execute(f'''UPDATE watercourse_stations\n SET location_id = {cursor.lastrowid}\n WHERE id = {station_id}''')", "def make_weather_features(self, timeline_dt_list):\n\n print \"Making weather features...\"\n\n N_FEATURES = 2\n n_examples = len(timeline_dt_list)\n XX = numpy.zeros((n_examples, N_FEATURES))\n indices = numpy.zeros(n_examples,dtype='int')\n ind_weatherday = 0\n\n # Loop over all times in the timeline\n for ii, time in enumerate(timeline_dt_list):\n # Find where this time in the timeline matches the date\n # of some weather data.\n jj = ind_weatherday\n while time.date() != self.datetimes[jj].date():\n # Make sure jj does not get too large to be an index to\n # the list.\n # Note this is probably a bad idea to do it this way.\n if jj == len(self.datetimes)-1:\n break\n jj += 1\n## print jj\n\n ind_weatherday = jj\n indices[ii] = ind_weatherday\n\n# XX[ii, 0] = self.table['PrecipIn'][ind_weatherday]\n# XX[ii, 1] = self.table['Mean TemperatureF'][ind_weatherday]\n## XX[ii, 2] = self.table['MeanDew PointF'][ind_weatherday]\n\n XX[:,0] = self.table['PrecipIn'][indices]\n XX[:,1] = self.table['Mean TemperatureF'][indices]\n self.weather_features = XX\n return XX", "def insert_entities_staging(self):\n\n for year in range(1900, CURRENT_YEAR, YEARS_RANGE):\n self.load_wikidata(\"movies\", MOVIES_BY_YEAR_SPARQL_QUERY, INSERT_MOVIE_SQL_QUERY, INSERT_MOVIE_MAP_COLUMNS, year, YEARS_RANGE)\n\n self.load_wikidata(\"songs\", SONGS_BY_YEAR_SPARQL_QUERY, INSERT_SONG_SQL_QUERY, INSERT_SONG_MAP_COLUMNS, year, YEARS_RANGE, True)\n self.load_wikidata(\"tvshows\", TVSHOWS_SPARQL_QUERY, INSERT_TVSHOW_SQL_QUERY, INSERT_TVSHOW_MAP_COLUMNS)\n self.load_wikidata(\"animatedmovies\", ANIMATEDMOVIES_SPARQL_QUERY, INSERT_ANIMATEDMOVIE_SQL_QUERY,\n INSERT_ANIMATEDMOVIE_MAP_COLUMNS)\n self.load_wikidata(\"videogames\", VIDEOGAMES_SPARQL_QUERY, INSERT_VIDEOGAME_SQL_QUERY, INSERT_VIDEOGAME_MAP_COLUMNS)\n self.load_wikidata(\"books\", BOOKS_SPARQL_QUERY, INSERT_BOOK_SQL_QUERY, INSERT_BOOK_MAP_COLUMNS)", "def populate_dyn(self, table):\n myrow = table.row\n myrow[\"sample_time\"] = int(time.time() - glob.base_time)\n myrow[\"available_bike_stands\"] = self.available_bike_stands\n myrow[\"available_bikes\"] = self.available_bikes\n myrow[\"last_update\"] = self.last_update\n myrow[\"status\"] = self.status\n myrow.append()\n table.flush()", "def to_WTH_converter(self, weather_data, dest_dir):\n ds_all = weather_data.get_global_dataset()\n if self.country is None:\n print(\"Country given is erroneous:\")\n return\n elif self.country == \"globe\":\n lon_num_start = 0\n lon_num_stop = weather_data.get_num_of_attribute('longitude')\n lat_num_start = 0\n lat_num_stop = weather_data.get_num_of_attribute('latitude')\n else:\n lon_num_start, lon_num_stop, lat_num_start, lat_num_stop = weather_data.get_country_boundary(self.country)\n\n\n # top bottom, left to right\n lon_num_start = 397\n lat_num_start = 184\n for lon_i in range(lon_num_start, lon_num_stop + 1):\n # for lon_i in range(lon_num_start, lon_num_stop+1):\n lon = ds_all.longitude.isel(longitude=lon_i).values.tolist()\n\n for lat_i in range(lat_num_start, lat_num_stop+1):\n # for lat_i in range(lat_num_start, lat_num_stop + 1):\n lat = ds_all.latitude.isel(latitude=lat_i).values.tolist()\n\n # create a dynamic header with updated LON, LAT info and move it into the folder given\n wth_header_u = ut.format_header(lat_i + 1, lon_i + 1, lat, lon)\n wth_header = dest_dir + \"/\" + wth_header_u\n shutil.move(wth_header_u, wth_header)\n\n # open in appending mode\n fwth = open(wth_header, \"a+\")\n\n # loop through daily weather data\n for t, date in enumerate(self.years):\n daily_data_vars = ut.get_daily_data_vars(ds_all, lat_i, lon_i, t)\n # disregard all NAN values\n if daily_data_vars is None:\n fwth.close()\n os.remove(wth_header)\n break\n\n if t == 0:\n ut.update_table(wth_header_u, lat, lon)\n\n entry = ut.format_data_vars_entry(daily_data_vars, date)\n\n # append this entry into the file\n fwth.write(entry)\n print(\"Added entry:\", entry)\n\n # close file after writing\n fwth.close()\n print(\"Output WTH:\", wth_header)", "def setUpClass(cls):\n cls.w = pd.read_csv(_df.WEATHER_TWO_WEEK, index_col='time',\n parse_dates=True)", "def _perform_data_conversion(self):\n self.data = []\n items = 0\n for value in self.elements_to_convert:\n try:\n location = parse_int(value.get('location_id'), nullable=False)\n if not value.get('list', []):\n continue\n for obs in value['list']:\n items += 1\n # Setting timezone to pytz.UTC FIXES [BUG-039].\n timestamp = parse_date_utc(obs.get('dt') * 1000)\n date = timestamp.date()\n time = timestamp.time()\n temperature = parse_int(obs['main'].get('temp'))\n pressure = parse_float(obs['main'].get('pressure'))\n humidity = parse_int(obs['main'].get('humidity'))\n wind_speed = parse_int(obs.get('wind', {}).get('speed'))\n wind_degrees = parse_int(obs.get('wind', {}).get('deg'))\n wind_direction = compute_wind_direction(wind_degrees)\n weather = obs.get('weather', [{}])[0]\n if weather.get('icon') and weather.get('id'):\n weather = - parse_int(weather.get('id'), nullable=False) if 'n' in weather['icon'] else \\\n parse_int(weather.get('id'), nullable=False)\n self.data.append(WeatherForecastObservation(location_id=location, date=date, time=time,\n temperature=temperature, pressure=pressure, humidity=humidity, wind_speed=wind_speed,\n wind_degrees=wind_degrees, wind_direction=wind_direction, weather_id=weather))\n except (ValueError, AttributeError, KeyError, IndexError, TypeError):\n _id = value.get('_id', 'Unknown ID')\n self.logger.exception('An error occurred while parsing data. WeatherForecastObservation with ID \"%s\" '\n 'will not be converted.' % _id)\n self.state['elements_to_convert'] = items", "def fill_table_for_ETF(conn):\n baseurl = f\"https://financialmodelingprep.com/api/v3/etf/list\"\n params = {\"apikey\": FMP_API_KEY}\n etfs = make_request(baseurl=baseurl, params=params)\n for etf in etfs:\n insert_etf(conn, etf[\"symbol\"], etf[\"name\"], etf[\"exchange\"])", "def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)", "def prepare_train_data(self):\r\n ## Impute rlkpis\r\n print(\"Imputing rlKPI df\")\r\n self.rlkpi.add_target_labels(1)\r\n self.rlkpi.impute_rl_kpis()\r\n\r\n print(\"Add 'met-real-station_no' & met-forecast-station_no to rl_kpis_df\")\r\n self.add_met_real_forecast_station_col_to_rlkpis()\r\n print(\"Merge 'met-real-sampled df to rl kps \")\r\n self.merge_met_real_sampled_df_to_rlkpis()\r\n\r\n ## Imputations for met-forecast\r\n print(\"Impute met-forecast\")\r\n met_forecast_obj = self.metfcast\r\n met_forecast_obj.impute_met_forecast()\r\n\r\n #Merge met forecast data to earlier merged data\r\n print(\"Merge Train data with imputed forecast df\")\r\n self.train_data = pd.merge(self.train_data,\r\n met_forecast_obj.imputed_forecast_df,\r\n on=['datetime-station_no'], indicator=True, how='inner')\r\n print(\"Check any imputation needed\", self.train_data.isna().sum().sum())\r\n self.train_data.drop(['_merge'], axis=1, inplace=True)\r\n self.perform_data_under_sampling(self.train_data)", "def process_weather(forecast_file):\n # Load json data file\n \n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n \n # Set Variables, Dictionaries and Lists\n days_list = []\n temp_dict = {}\n daily_dict = {}\n\n num_items = 0\n total_sum_min = 0\n total_sum_max = 0\n days = len(json_data['DailyForecasts'])\n days_list = days_in_data(days)\n\n t_temp_min = 100\n t_temp_max = 0\n\n # Pull through the data\n\n for day in days_list:\n num_items += 1\n date = convert_date(json_data['DailyForecasts'][day]['Date'])\n min_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Minimum']['Value'])\n total_sum_min += min_temp\n max_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Maximum']['Value'])\n total_sum_max += max_temp\n day_desc = json_data['DailyForecasts'][day]['Day']['LongPhrase']\n chance_rain_day = json_data['DailyForecasts'][day]['Day']['RainProbability']\n night_desc = json_data['DailyForecasts'][day]['Night']['LongPhrase']\n chance_rain_night = json_data['DailyForecasts'][day]['Night']['RainProbability']\n \n if min_temp < t_temp_min:\n t_temp_min = min_temp\n t_temp_mindate = date\n else:\n pass\n if max_temp > t_temp_max:\n t_temp_max = max_temp\n t_temp_maxdate = date\n else:\n pass\n \n daily_dict[day] = [date, min_temp, max_temp, day_desc, chance_rain_day, night_desc, chance_rain_night]\n # 0 1 2 3 4 5 6 \n \n # print(temp_dict)\n # print(daily_dict)\n\n # Calculate Minimum, Maximum and Mean temperatures\n\n mean_min = format_temperature(calculate_mean(total_sum_min, num_items))\n # print(mean_min)\n mean_max = format_temperature(calculate_mean(total_sum_max, num_items))\n # print(mean_max)\n\n # Format Minimum and Maximum temperatures\n min_temp_format = format_temperature(t_temp_min)\n max_temp_format = format_temperature(t_temp_max)\n\n ##############################################################################################\n\n # Combine string messages to return to user\n\n str_Output = \"\"\n Output_gen1 = (f\"{num_items} Day Overview\\n\")\n Output_gen2 = (f\" The lowest temperature will be {min_temp_format}, and will occur on {t_temp_mindate}.\\n\")\n Output_gen3 = (f\" The highest temperature will be {max_temp_format}, and will occur on {t_temp_maxdate}.\\n\")\n Output_gen4 = (f\" The average low this week is {mean_min}.\\n\")\n Output_gen5 = (f\" The average high this week is {mean_max}.\\n\")\n str_Output = Output_gen1 + Output_gen2 + Output_gen3 + Output_gen4 + Output_gen5\n for key, value in daily_dict.items():\n Output_daily0 = (\"\\n\")\n Output_daily1 = (f\"-------- {value[0]} --------\\n\")\n Output_daily2 = (f\"Minimum Temperature: {format_temperature(value[1])}\\n\")\n Output_daily3 = (f\"Maximum Temperature: {format_temperature(value[2])}\\n\")\n Output_daily4 = (f\"Daytime: {value[3]}\\n\")\n Output_daily5 = (f\" Chance of rain: {value[4]}%\\n\")\n Output_daily6 = (f\"Nighttime: {value[5]}\\n\")\n Output_daily7 = (f\" Chance of rain: {value[6]}%\\n\")\n str_Output = str_Output + Output_daily0 + Output_daily1 + Output_daily2 + Output_daily3 + Output_daily4 + Output_daily5 + Output_daily6 + Output_daily7\n str_Output = str_Output +\"\\n\"\n\n return str_Output", "def generate_database_from_metadatas(metadata_dict, stimulus_dict):\n # Create empty database from template class\n indexes = sorted(metadata_dict.keys())\n database = pd.DataFrame(index=indexes, columns=['Number', 'Metadata', 'Tracking', 'Registration', 'Stimuli'])\n\n # Fill in metadata from the dictionary\n for sessname, metadata in sorted(metadata_dict.items()):\n database['Metadata'][sessname] = metadata\n database['Number'][sessname] = metadata['number']\n\n for sessname, stimulus in sorted(stimulus_dict.items()):\n database['Stimuli'][sessname] = stimulus\n\n print(colored('Database initialized.','yellow'))\n return database", "def load_dwh_tables(self):\n print(\"Loading the creative works table\")\n self.cur.execute(dwh_queries.INSERT_CREATIVE_WORKS_SQL_QUERY)\n self.conn.commit()\n\n print(\"Loading the participations table\")\n\n self.cur.execute(dwh_queries.INSERT_PARTICIPATIONS_SQL_QUERY)\n self.conn.commit()", "def populate_table(self, data):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO film (title, film_id, year, director, cast, rating, poster_url) \n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cur.execute(sql, data)\n db.commit()\n except:\n print(\"An error occurred when saving the data!\")\n\n db.close()", "def testWeatherAggregation(self):\n\n rowCnt = 0\n agg = self.aggregator.aggregatedData(dataType = 'weather',\n aggregationType = 'agg_weather',\n timeColumnName = 'timestamp',\n subkeyColumnName = None,\n startDate = self.testStart,\n endDate = self.testEnd)\n for row in agg.data:\n print '%d: %s' % (rowCnt, row)\n rowCnt += 1\n self.assertEqual(rowCnt, 1, 'Row count not correct.')\n\n self.logger.log('agg cols: %d' % len(agg.columns))\n self.assertEqual(len(agg.columns), 3, 'Weather columns not equal to 3.')\n self.aggregator.insertAggregatedData(agg = agg)", "def _get_information(self):\n weather_dict = {}\n table_body = self.climate_table\n\n rows = table_body.find_all('tr')\n months = [col.get_text() for col in rows[0].find_all('td')[1:]]\n\n for row in rows[1:]:\n cols = row.find_all('td')\n key = cols[0].get_text()\n value_getter = self._value_getters_by_key.get(key, self._get_remote_workers)\n\n weather_dict.update({key: [(months[i],) + value_getter(col) for i, col in enumerate(cols[1:])]})\n\n return weather_dict", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def weather_script(\n init_data_path: str,\n output_path: str,\n workers: int,\n weatherAPI_rpm: int,\n geoAPI_rpm: int,\n) -> None:\n unzip(init_data_path, output_path)\n top_hotels_dataframe_without_addresses = primary_data_proc(output_path)\n geocoder = geocoder_setup(geoAPI_rpm)\n top_hotels_df_with_addresses = define_address(\n top_hotels_dataframe_without_addresses,\n workers,\n geocoder,\n )\n cities, countries, coordinates = city_center_coord(top_hotels_df_with_addresses)\n weather_df = pd.concat(\n [\n prev_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n forecast_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n ]\n )\n\n logger.info(\"Start to save results\")\n save_main_info(output_path, weather_df, top_hotels_df_with_addresses)\n logger.info(\"Finish\")", "def get_usda_food_data (connection):\n\n tables = ['usda_food_access_feb2014', 'usda_food_assistance_feb2014',\n 'usda_food_health_feb2014', 'usda_food_insecurity_feb2014',\n 'usda_food_stores_feb2014']\n\n for table in tables:\n if table == tables[0]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_LACCESS_POP10\" AS \"low_access_food_pct10\",\n\"PCT_LACCESS_LOWI10\" AS \"low_access_food_low_inc_pct10\",\n\"PCT_LACCESS_SENIORS10\" AS \"low_access_food_snr_pct10\",\n\"PCT_LACCESS_HHNV10\" AS \"low_access_food_no_car_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[1]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"REDEMP_SNAPS12\" AS \"snap_redemp_per_store_2012\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[2]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_DIABETES_ADULTS10\" AS \"pct_diabetes_adults_2010\",\n\"PCT_OBESE_ADULTS13\" AS \"pct_obese_adults_2013\",\n\"RECFACPTH12\" AS \"rec_fac_2012\",\n\"NATAMEN\" AS \"ers_nat_amenity_index_1999\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[3]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"FOODINSEC_10_12\" AS \"food_insec_house_pct_10_12\",\n\"VLFOODSEC_10_12\" AS \"very_low_food_insec_house_pct_10_12\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[4]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"GROCPTH12\" AS \"grocery_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n\n if table == tables[0]:\n data = pd.read_sql_query(sql_query, con)\n data.where ((pd.notnull (data)), other=np.nan, inplace=True)\n data = data.dropna (subset=['FIPS'])\n data['FIPS'] = data['FIPS'].apply (lambda x: str(x).zfill (5))\n else:\n data_tmp = pd.read_sql_query(sql_query, con)\n data_tmp.where ((pd.notnull (data_tmp)), other=np.nan, inplace=True)\n data_tmp = data_tmp.dropna (subset=['FIPS'])\n data_tmp['FIPS'] = data_tmp['FIPS'].apply (lambda x: str(x).zfill (5))\n data = pd.merge (data, data_tmp, on=\"FIPS\", how=\"left\")\n\n return (data)", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def fill_table(info):\n # extrac attributes from info struct\n data = info[\"data\"]\n table = info[\"table\"]\n header = info[\"header\"]\n row_num = info[\"row_num\"]\n\n currency_type_num = row_num - 1\n row_index = 0\n col_index = 0\n i = 0\n while i < len(data):\n if data[i].find(\"%\") > 0:\n # stat data\n while i < len(data) and row_index < currency_type_num:\n table[row_index+1].append(data[i])\n row_index += 1\n i += 1\n # Reset row_index\n row_index = 0\n else:\n if i < row_num - 1:\n # currency Type\n table[i+1].append(data[i])\n else:\n # time marker\n if data[i] != header:\n table[0].append(data[i])\n i += 1\n\n # End loop\n return None", "def init():\n database.create_tables([Tracker])\n database.commit()", "def filldf(tfinal):\n getbasics(tfinal)\n # getusermentions(tfinal)\n getretweets(tfinal)\n getinreply(tfinal)\n return tfinal", "def forecast_weather(self):\n pass", "def create_tables(cur, country_json, xml_state, body_json):\n print(\"Creating the 3 first tables...\")\n cur.execute('CREATE TABLE IF NOT EXISTS country_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_total_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_total_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_total_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_albums(ID INTEGER PRIMARY KEY, state TEXT, year INTEGER, genre TEXT, album TEXT, amount INTEGER)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO country_albums VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' %\n (idx, body_json['state'], body_json['year'], body_json['genre'], album,\n xml_state[0][0][idx].text))", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def get_weather(start: str, end: str) -> Generator[Dict[str, str], None, None]:\n speeds = get_speeds(start, end)\n for data in get_temperatures(start, end):\n data.update(next(speeds))\n yield data", "def create_final_table(conn, county):\r\n for county in county:\r\n query = f\"SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%' AND name = '{county}'\"\r\n result = execute_query(conn, query)\r\n try:\r\n if len(result) == 0:\r\n query = f\"create table {county} as select * from {county}_stg;\"\r\n execute_query(conn, query)\r\n\r\n load_final_table(conn, county)\r\n except Exception as e:\r\n print(f\"This query {query} failed with exception {e}\")", "def load_data(city, month, day):\n\n if city == 'all':\n df = pd.read_csv(CITY_DATA['chicago'])\n df['city'] = 'chicago'\n ny = pd.read_csv(CITY_DATA['new york city'])\n ny['city'] = 'new york city'\n df = df.append(ny,sort = True)\n wa = pd.read_csv(CITY_DATA['washington'])\n wa['city'] = 'washington'\n df = df.append(wa,sort = True)\n else:\n df = pd.read_csv(CITY_DATA[city])\n df['city'] = CITY_DATA[city]\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) #converts Start Time to datetime\n df['End Time'] = pd.to_datetime(df['End Time']) #converts End Time to datetime\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n #print(df) #DataFrame\n\n #filter by month\n if month != 'all':\n month_name = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month_num = month_name.index(month) + 1\n df = df[df['month'] == month_num]\n #filter by day\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n recs = df['Start Time'].count()\n\n return df, recs", "def setUp(self):\n table = self.get_local_dynamo_cli().Table(constants.get_configuration_table())\n response = table.scan()\n\n timeseries = [item['timeserie'] for item in response['Items']]\n\n for ts in timeseries:\n table.delete_item(Key={'timeserie': ts})", "def temperature_only_data_prep(observations, predictors, for_prediction=False,\n verbose=True):\n predictors = predictors[['doy', 'site_id', 'year', 'temperature']].copy()\n doy_series = predictors.doy.dropna().unique()\n doy_series.sort()\n predictors = predictors.pivot_table(index=['site_id', 'year'], columns='doy', values='temperature').reset_index()\n\n # This first and last day of temperature data can causes NA issues because\n # of leap years.If thats the case try dropping them\n first_doy_has_na = predictors.iloc[:, 2].isna().any() # first day will always be col 2\n if first_doy_has_na:\n first_doy_column = predictors.columns[2]\n predictors.drop(first_doy_column, axis=1, inplace=True)\n doy_series = doy_series[1:]\n warn(\"\"\"Dropped temperature data for doy {d} due to missing data. Most likely from leap year mismatch\"\"\".format(d=first_doy_column))\n\n last_doy_index = predictors.shape[1] - 1\n last_doy_has_na = predictors.iloc[:, last_doy_index].isna().any()\n if last_doy_has_na:\n last_doy_column = predictors.columns[-1]\n predictors.drop(last_doy_column, axis=1, inplace=True)\n doy_series = doy_series[:-1]\n warn(\"\"\"Dropped temperature data for doy {d} due to missing data. Most likely from leap year mismatch\"\"\".format(d=last_doy_column))\n\n # Dont need the doy column if it's present and prediction is being done\n if for_prediction and 'doy' in observations.columns:\n observations = observations.drop('doy', axis=1)\n # Give each observation a temperature time series\n obs_with_temp = observations.merge(predictors, on=['site_id', 'year'], how='left')\n\n # Deal with any site/years that don't have temperature data\n original_sample_size = len(obs_with_temp)\n rows_with_missing_data = obs_with_temp.isnull().any(axis=1)\n missing_info = obs_with_temp[['site_id', 'year']][rows_with_missing_data].drop_duplicates()\n if len(missing_info) > 0:\n obs_with_temp.dropna(axis=0, inplace=True)\n n_dropped = original_sample_size - len(obs_with_temp)\n warn('Dropped {n0} of {n1} observations because of missing data'.format(n0=n_dropped, n1=original_sample_size) +\n '\\n Missing data from: \\n' + str(missing_info))\n\n temperature_array = obs_with_temp[doy_series].values.T\n\n if for_prediction:\n return temperature_array, doy_series\n else:\n observed_doy = obs_with_temp.doy.values\n return observed_doy, temperature_array, doy_series", "def load_data(city, month, day):\n \n# Using pandas accessor to find month, day, hour from the Start Time column in the source data\n print(\"A moment please while I find the data....\\n\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n\n return df", "def load_data(city, month, day):\n while month != \"\":\n # load data file into a dataframe\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n # df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n try: df['day_of_week'] = df['Start Time'].dt.weekday_name\n except: df['day_of_week'] = df['Start Time'].dt.day_name()\n else: df['day_of_week'] = df['Start Time'].dt.weekday\n \n \n \n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = int(months.index(month)) + 1\n \n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city_input, month_input, day_input):\n # Read csv for city_input using CITY_DATA dictionary to create df\n df = pd.read_csv(CITY_DATA[city_input])\n\n # Convert 'Start Time' and 'End Time' columns in df to datetime with pd.to_datetime function\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # Include month number in df using dt.month\n df['Start Month'] = df['Start Time'].dt.month\n\n # Include weekday in df using dt.weekday_name - note its format, e.g. Monday\n df['Start Day'] = df['Start Time'].dt.weekday_name\n\n # Include hour in df using dt.hour\n df['Start Hour'] = df['Start Time'].dt.hour\n\n ## Month\n if month_input != 'all':\n # Create a list of months based on months indices using .index(element)\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n # Python uses 0 indexing so we need to increase the values by 1 to correspond with month numbers\n month = MONTHS.index(month_input) + 1\n # Filter by month to create the new dataframe\n df = df[df['Start Month'] == month] # where month is the indexed version of the user input\n\n ## Day\n # Reformat day_input to Friday, for example\n day = day_input.title()\n\n if day != 'All':\n # Create a list of days\n DAYS = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday', 'All']\n # Filter by day of week to create the new dataframe\n if day != 'All':\n df = df[df['Start Day'] == day]\n\n # Replace 'Trip Duration' with calculated version\n # This felt simpler than converting the number of seconds into days, hours, minutes, seconds ;)\n df['Trip Duration'] = df['End Time'] - df['Start Time']\n\n # print(df.head(20))\n return df", "def load_metadata(self):\n self.meta[\"user_tables\"] = pd.read_sql(self.SQL[\"User Tables\"], self.engine)\n self.meta[\"all_tables\"] = pd.read_sql(self.SQL[\"All Tables\"], self.engine)\n self.meta[\"all_databases\"] = pd.read_sql(self.SQL[\"All Databases\"], self.engine)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_value=5, max_value=20)\n\n for _ in range(number_of_courses):\n course_name = fake.word()\n\n insert_statement = f'insert into courses (name) values (\"{course_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n number_of_users = fake.pyint(min_value=1, max_value=23)\n\n Faker.seed()\n\n for _ in range(number_of_users):\n\n if fake.pybool():\n user_name = f'{fake.first_name_female()} {fake.last_name_female()}'\n else:\n user_name = f'{fake.first_name()} {fake.last_name()}'\n\n insert_statement = f'insert into users (name) values (\"{user_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n for _ in range(50000):\n Faker.seed()\n\n random_user_id = fake.pyint(1, number_of_users)\n random_course_id = fake.pyint(1, number_of_courses)\n Faker.seed()\n random_lesson_no = fake.pyint(3, 12)\n Faker.seed()\n random_exercise_no = fake.pyint(1, 50)\n random_data = fake.sentence()\n\n insert_statement = f\"\"\"insert into saves (user_id, course_id, lesson_no, exercise_no,data) \n values ({random_user_id}, {random_course_id}, {random_lesson_no}, \n {random_exercise_no}, '{random_data}');\"\"\"\n c.execute(insert_statement)\n\n connection.commit()", "def load_wdi() -> None:\n\n log.info(\"Started loading WDI.\")\n df = _load_and_stage_wdi()\n\n schema = \"wdi_202005\"\n db.drop_schema(schema)\n db.create_schema(schema)\n\n # Push completely raw but staged data\n log.debug(\"Done preparing raw WDI, pushing.\")\n fqtables = [f\"{schema}.cy_unimp_part_{p}\" for p in [1, 2]]\n db.df_to_split_db(df=df, fqtables=fqtables)\n\n _fill_and_push_wdi(df=df, schema=schema, n_imp=5)\n log.info(\"Finished loading WDI.\")", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def set_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)\n self.con.commit()", "def generate_fake_data(context, db_connection):\n db_cursor = db_connection.cursor()\n\n for table in context.tables:\n faker = fake_data_gen.Faker(table, context)\n _fill_table(table, faker, db_cursor)\n db_connection.commit()\n\n db_cursor.close()", "def load_data(city, month, day):\n\n print(\"\\nWe are loading the information for the selected filters.\")\n start_time = time.time()\n\n # filter the data according to the selected city\n if isinstance(city, list):\n df = pd.concat(map(lambda city: pd.read_csv(CITY_DATA[city]), city),\n sort=True)\n # reorganize DataFrame columns after a city concat\n try:\n df = df.reindex(columns=['Unnamed: 0', 'Start Time', 'End Time',\n 'Trip Duration', 'Start Station',\n 'End Station', 'User Type', 'Gender',\n 'Birth Year'])\n except:\n pass\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n # create columns to see the statistics\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter month and weekday see the data in two new DataFrames\n if isinstance(month, list):\n df = pd.concat(map(lambda month: df[df['Month'] ==\n (months.index(month)+1)], month))\n else:\n df = df[df['Month'] == (months.index(month)+1)]\n\n if isinstance(day, list):\n df = pd.concat(map(lambda day: df[df['day_of_week'] ==\n (day.title())], day))\n else:\n df = df[df['day_of_week'] == day.title()]\n\n print(\"\\nThis took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n return df", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")" ]
[ "0.6654514", "0.66226166", "0.6348557", "0.632052", "0.62991303", "0.62082505", "0.61876976", "0.6142009", "0.60627335", "0.6002963", "0.5983051", "0.59750384", "0.5964339", "0.5926051", "0.59136045", "0.59097177", "0.58774203", "0.5861508", "0.5857061", "0.5843847", "0.58242774", "0.58109164", "0.58074266", "0.5801949", "0.57935333", "0.57928663", "0.5791196", "0.5760634", "0.57488865", "0.5742264", "0.57342136", "0.57302773", "0.57123727", "0.5710525", "0.5705605", "0.57034856", "0.5700655", "0.5695572", "0.5690213", "0.5680076", "0.5660745", "0.5651602", "0.5647642", "0.56444156", "0.5639949", "0.5622278", "0.558798", "0.5572142", "0.55607635", "0.5548546", "0.5548171", "0.55465627", "0.55456936", "0.5512822", "0.5512715", "0.5511217", "0.550549", "0.5504847", "0.55042017", "0.54941374", "0.54896235", "0.5459899", "0.5448563", "0.54432505", "0.5442965", "0.5441291", "0.543846", "0.54333615", "0.54299766", "0.5425626", "0.54237247", "0.54230624", "0.54210615", "0.5419529", "0.5412184", "0.5400605", "0.5378381", "0.53771275", "0.53707474", "0.53680986", "0.5367555", "0.5366793", "0.53660566", "0.53658164", "0.53618175", "0.53607786", "0.53498614", "0.5347209", "0.5346267", "0.53462064", "0.5345792", "0.5345397", "0.53384537", "0.53377444", "0.5336784", "0.5336686", "0.53362316", "0.53319883", "0.5330822", "0.53300315" ]
0.78376013
0
Creates and populates water and weather database. Returns None
def create_databases(): db_connection = connect_to_db() # Create database tables. create_tables(db_connection) # Populate water tables. populate_water_tables(db_connection) # station_data = get_station_data() # station = station_data.query('ŠIFRA == 30301') # print(station) # index = station.index[0] # lat = station.at[index, 'LAT'] # lng = station.at[index, 'LON'] # name = f"{station.at[index, 'VODOMERNA POSTAJA']} ({station.at[index, 'VODOTOK']})" # print(index, lat, lng, name) # Populate location tables # populate_locations(db_connection) # Populate weather tables populate_weather(db_connection) db_connection.commit() db_connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')", "def create_db(temp: list, rain: list, humidity: list, wind: list) -> dict:\r\n weather = {}\r\n for i in range(len(temp)):\r\n weather[i+1] = [temp[i], rain[i], humidity[i], wind[i]]\r\n return weather", "def populate_database(telescope_name, instrument_name):\n telescope = Telescope.objects.create(\n name=telescope_name, latitude=25.0, longitude=45.0)\n instrument = Instrument.objects.create(\n name=instrument_name, telescope=telescope)\n for year_int in (2012, 2013):\n for month_int in range(1, 13):\n for night_int in (1, monthrange(year_int, month_int)[1]):\n ut_date = date(year_int, month_int, night_int)\n night = Night.objects.create(\n ut_date=ut_date, instrument=instrument, observers='Smith')\n Exposure.objects.create(\n night=night, run_number=1, ut_start=time(10, 0, 0),\n exposed=20.0, ra=60.0, dec=30.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=2, ut_start=time(11, 0, 0),\n exposed=30.0, ra=90.0, dec=0.0, object_exp=True)\n Exposure.objects.create(\n night=night, run_number=3, ut_start=time(12, 0, 0),\n exposed=40.0, ra=120.0, dec=-30.0, object_exp=False)", "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')\n aquifer_count = cursor.fetchone()[0]\n\n if watercourse_count and aquifer_count:\n print('Water tables already populated!')\n return\n\n station_data = get_station_data()\n\n for archive in metadata.keys():\n print(f'{archive}-water:'.upper())\n water_body = get_water_definitions(archive)['body']\n\n # 1. Populate watercourses/aquifers:\n stations = {}\n for water_body_name in metadata[archive].keys():\n print(f'\\tPopulating {water_body}: \"{water_body_name}\"')\n cursor.execute(f'''INSERT INTO {water_body}s(location_id, name)\n VALUES (0, '{water_body_name}')''')\n water_body_id = cursor.lastrowid\n\n # 2. Populate watercourse_stations/aquifer_stations:\n for station_id in metadata[archive][water_body_name]['stations']:\n station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name'])\n\n if station_id in stations:\n # Prefer watercourses/aquifer with more stations\n current_len = len(metadata[archive][water_body_name]['stations'])\n previous_len = len(metadata[archive][stations[station_id]]['stations'])\n\n if current_len < previous_len:\n print(f'\\t\\tStation already exists: {station_id} - \"{station_name}\" (\"{water_body_name}\")')\n continue\n else:\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station: {station_id} - \"{station_name}\" from \"{stations[station_id]}\")')\n\n stations[station_id] = water_body_name\n print(f'\\t\\tPopulating station: {station_id} - \"{station_name}\"')\n\n # Insert station location if station data exists.\n location_id = 0\n station_row = station_data.query(f'ŠIFRA == \"{station_id}\"')\n if not station_row.empty:\n index = station_row.index[0]\n lat = station_row.at[index, 'LAT']\n lng = station_row.at[index, 'LON']\n if not np.isnan(lat) and not np.isnan(lng):\n name = f\"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})\"\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{name}', {lat}, {lng})''')\n location_id = cursor.lastrowid\n\n # Insert station.\n cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name)\n VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''')\n\n # 3. Populate watercourse_measurements/aquifer_measurements:\n if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'],\n station_id):\n cursor.execute(f'''DELETE \n FROM {water_body}_stations\n WHERE id = {station_id}''')\n print(f'\\t\\tRemoved station with useless data: {station_id} - \"{station_name}\"')\n\n # Remove empty watercourses/aquifers.\n cursor.execute(f'''SELECT w.id, w.name\n FROM {water_body}s w\n WHERE NOT EXISTS (\n SELECT s.id \n FROM {water_body}_stations s \n WHERE w.id = s.{water_body}_id\n )''')\n\n for row in cursor.fetchall():\n cursor.execute(f'''DELETE \n FROM {water_body}s\n WHERE id = {row[0]}''')\n print(f'\\tRemoved empty {water_body}: \"{row[1]}\"')", "def update_weather(location_request, db):\n with open(expanduser(\"~/bin/my_utilities/config/darksky-key\")) as f:\n ds_key = f.readline().strip()\n current = []\n current_day = 0\n with forecast(ds_key, *location_request, units=\"uk2\") as location:\n raw = location['hourly']['data'][0]\n current.append(datetime.datetime.now().hour)\n current.append(day_relative_to_absolute(current_day))\n current.append(raw[\"temperature\"])\n current.append(raw[\"apparentTemperature\"])\n current.append(raw[\"precipIntensity\"])\n current.append(raw[\"precipProbability\"] * 100)\n current.append(raw[\"humidity\"] * 100)\n current.append(raw[\"dewPoint\"])\n current.append(raw[\"windSpeed\"])\n current.append(raw[\"windBearing\"])\n current.append(raw[\"windGust\"])\n current.append(raw[\"pressure\"])\n current.append(raw[\"cloudCover\"] * 100)\n current.append(raw[\"uvIndex\"])\n current.append(raw[\"visibility\"])\n current = format_list_for_db(current)\n\n columns = [\"hour\", \"day\", \"temp\", \"apptemp\", \"precipint\", \"precipprob\",\n \"humidity\", \"dewpoint\", \"windspeed\", \"windbearing\",\n \"windgust\", \"pressure\", \"cloudcover\", \"uvindex\", \"visibility\"]\n columns = format_list_for_db(columns)\n statement = f\"INSERT INTO WEATHER {columns} VALUES {current}\"\n print(statement)\n cursor = db.cursor()\n cursor.execute(statement)\n cursor.close()", "def prepare_database(self, waterscenario=None, trafficscenario=None):\n\n # Validate input\n if waterscenario:\n waterscenario = Path(waterscenario)\n assert waterscenario.exists(), 'Waterscenario file not found'\n\n BIVAS = pyBIVAS(self.BIVAS_database)\n df_trafficscenarios = BIVAS.trafficscenario_numberoftrips()\n\n\n # Do changes to database:\n con = sqlite3.connect(self.BIVAS_database)\n c = con.cursor()\n\n # Update waterscenario with given file\n if waterscenario:\n # Delete current water_scenario_values\n sql = \"DELETE FROM water_scenario_values WHERE 1\"\n c.execute(sql)\n\n sql = \"DELETE FROM water_scenarios WHERE 1\"\n c.execute(sql)\n\n # Write waterdata to database\n\n # Read waterscenario file\n df = pd.read_csv(waterscenario, header=0, index_col=None)\n df = df[['ArcID', 'SeasonID', 'WaterLevel__m', 'RateOfFlow__m3_s', 'WaterSpeed__m_s', 'WaterDepth__m']]\n df['WaterScenarioID'] = 1\n\n # Add new water_scenario\n df.to_sql('water_scenario_values', con,\n if_exists='append', index=False)\n\n # Rename water_scenario\n # waterscenario_name = waterscenario.stem\n # sql = \"\"\"UPDATE water_scenarios SET Description = \"{}\" WHERE ID = {}\"\"\".format(\n # waterscenario_name, waterscenario)\n # c.execute(sql)\n\n\n waterscenario_id = 1\n waterscenario_name = 'TEST waterscenario'\n waterscenario_type = 1\n sql = \"\"\"INSERT into water_scenarios VALUES ({}, '{}', {})\"\"\".format(\n waterscenario_id,\n waterscenario_name,\n waterscenario_type\n )\n c.execute(sql)\n\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = 1 WHERE 1\"\"\"\n c.execute(sql)\n\n else:\n # Remove water scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n sql = \"\"\"UPDATE parameters SET WaterScenarioID = NULL WHERE 1\"\"\"\n c.execute(sql)\n\n # Set scenario name and description\n date_string = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.description = f'Date: {date_string}, Waterscenario: {waterscenario}, TrafficScenario: {trafficscenario},'\n\n sql = \"\"\"\n UPDATE scenarios\n SET Name = \"{}\",\n Description = \"{}\"\n WHERE ID = {}\n \"\"\".format(\n self.scenarioName, self.description, self.scenarioID)\n c.execute(sql)\n\n # Update traffic Scenario. I'm simply updating all scenarios\n # Otherwise I should check the BranchSet structure\n if trafficscenario:\n if isinstance(trafficscenario, int):\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficscenario)\n c.execute(sql)\n else:\n trafficScenarioID = df_trafficscenarios.index[df_trafficscenarios['Description'] == trafficscenario][0]\n sql = \"\"\"UPDATE parameters SET TrafficScenarioID = \"{}\" WHERE 1\"\"\".format(trafficScenarioID)\n c.execute(sql)\n\n con.commit()\n con.close()\n\n logger.info('BIVAS database copied and updated')", "def store_weather(data: List[WeatherData]):\n insert_cmd = 'INSERT INTO weather VALUES (?, ?, ?)'\n with closing(sqlite3.connect('weather.db')) as conn:\n with closing(conn.cursor()) as cursor:\n cursor.executemany(insert_cmd, data)\n conn.commit()", "def create_table(self):\n # Connect to database\n conn = sqlite3.connect(self)\n # Create a cursor\n c = conn.cursor()\n\n # Create a Table\n c.execute(\"\"\"CREATE TABLE weather (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n sensor text,\n location text,\n temperature real,\n description text,\n time text\n )\"\"\")\n # Commit our command\n conn.commit()\n # Close our connection\n conn.close()", "def grasspi_add_db(table_name,row):\n\n if table_name == \"weatherdata\":\n\tconn = sqlite3.connect(grasspi_config.cfg.db_file)\n \tc = conn.cursor()\n\tc.execute('INSERT INTO ' + table_name + ' values (?,?,?,?,?,?,?,?,?,?,?,?,?)',[row[\"date\"],row[\"time\"],\n \trow[\"current_temp\"],row[\"current_rain\"],row[\"total_rain\"],row[\"current_wind_speed\"],\n \trow[\"current_wind_direction\"],row[\"current_humidity\"],row[\"current_air_pressure\"],\n \trow[\"current_shortwave_rad\"],row[\"current_atm_rad\"],row[\"day_length\"],row[\"elevation\"]])\n elif table_name == \"wateringschedule\":\n\tconn = sqlite3.connect(grasspi_config.cfg.db_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n \tc = conn.cursor()\n\tc.execute('INSERT INTO ' + table_name + ' values (?,?,?)',[row[\"zonenumber\"],\n\trow[\"starttime\"],row[\"duration\"]])\n # Save (commit) the changes\n conn.commit()\n # We can also close the cursor if we are done with it\n c.close()", "def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)", "def load_wdi() -> None:\n\n log.info(\"Started loading WDI.\")\n df = _load_and_stage_wdi()\n\n schema = \"wdi_202005\"\n db.drop_schema(schema)\n db.create_schema(schema)\n\n # Push completely raw but staged data\n log.debug(\"Done preparing raw WDI, pushing.\")\n fqtables = [f\"{schema}.cy_unimp_part_{p}\" for p in [1, 2]]\n db.df_to_split_db(df=df, fqtables=fqtables)\n\n _fill_and_push_wdi(df=df, schema=schema, n_imp=5)\n log.info(\"Finished loading WDI.\")", "def sql_functions(cities, weather):\n con = lite.connect(\"sql_database.db\")\n tables_tuple = (\"cities\", \"weather\")\n with con:\n #Generate tables in database:\n cur = con.cursor() #Get cursor object\n for table in tables_tuple:\n cur.execute(\"DROP TABLE IF EXISTS {0}\".format(table)) #Drop tables if they already exist.\n cur.execute(\"CREATE TABLE cities (name text, state text)\")\n cur.execute(\"CREATE TABLE weather (city text, year integer, warm_month text, cold_month text, average_high integer)\")\n #Populate tables in database:\n cur.executemany(\"INSERT INTO cities VALUES (?,?)\", cities)\n cur.executemany(\"INSERT INTO weather VALUES (?,?,?,?,?)\", weather)\n #Retrieve data from database:\n cur.execute(\"SELECT * FROM cities INNER JOIN weather ON city = name\")\n rows = cur.fetchall()\n cols = [desc[0] for desc in cur.description]\n output_dataframe = pd.DataFrame(rows, columns = cols)\n \n return output_dataframe", "def prepare_db():\n conn = sqlite.connect(\"temp.db\")\n sql = conn.cursor()\n sql.execute(\"SELECT sql FROM sqlite_master WHERE name='points'\")\n rows = sql.fetchall()\n if len(rows) == 0:\n print \"Database does not exist. Creating Database...\"\n sql.execute('''CREATE TABLE points\n (date datetime, humidity real, temp_c real, temp_f real, index_c real, index_f)''')\n print \"Database created\"\n conn.close()", "def init_db() -> None:\n conn = sqlite3.connect('../Utils/map_storage.db')\n cursor = conn.cursor()\n\n with conn:\n station_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n nodes(city TEXT, name TEXT, is_station TEXT, x INT, y INT, zone TEXT)\"\"\"\n\n cursor.execute(station_cmd)\n\n connection_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n connections(city TEXT, name_1 TEXT, name_2 TEXT, color TEXT)\"\"\"\n\n cursor.execute(connection_cmd)", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def save (self) :\n\n try:\n # This opens and closes the connections to the database.\n # Since writings occur only every 10 minutes it wouldn't be efficient to\n # let the connection open.\n db = MySQLdb.connect(\n host=config.db_host, db=config.db_name, user=config.db_user, passwd=config.db_password)\n cur = db.cursor()\n\n # get the reservoir height to be able to measure the water level\n sqlQuery = \"SELECT heigth FROM main_reservoir WHERE res_id = {}\".format(self.reservoir)\n cur.execute(sqlQuery)\n # self.waterGap is the height of the part of the reservoir\n # that's out of water. reservoir height minus that value will give the actual water level\n reservoirHeight = cur.fetchone()[0]\n waterLevel = reservoirHeight - self.waterGap\n\n sqlQuery = \"\"\"\n INSERT INTO main_measurement (packetNr, waterLevel, pH, conductivity, reservoir_id, dateTime, salinity, tds)\n VALUES ({}, {}, {}, {}, {}, now(), {}, {})\n \"\"\".format(\n self.packetNr,\n waterLevel,\n self.pH,\n self.conductivity,\n self.reservoir,\n self.salinity,\n self.tds)\n cur.execute(sqlQuery)\n\n db.commit()\n db.close()\n except Exception as e:\n print('[Measurement#save] failed to save instance: {}'.format(e))", "def create_db(self):\n\t\tcur, conn = self.open_connection()\n\n\t\t# this creates the meta table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS main;\n\t\tCREATE TABLE meta (\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tUT_date TEXT,\n\t\t\ttime_of_obs TEXT,\n\t\t\tobject_name TEXT,\n\t\t\tintegration_time FLOAT,\n\t\t\tgrating INTEGER,\n\t\t\tcentral_wavelength FLOAT,\n\t\t\tslit_width INTEGER,\n\t\t\tphase_angle FLOAT,\n\t\t\tcomments TEXT\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\t# this creates the spectra table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS spectrum;\n\t\tCREATE TABLE spectra (\n\t\t\tspec_id INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tid INTEGER,\n\t\t\twave array,\n\t\t\tspectrum array,\n\t\t\tFOREIGN KEY(id) REFERENCES meta(id)\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\tconn.commit()\n\t\tconn.close()", "def _populate():\n models.Base.metadata.create_all(engine)\n logger.info(\"Initalized database\")\n db = Session()\n\n hermann = models.Account(id=\"test\",\n name=\"Hermann Dörkschneider\",\n email=\"[email protected]\")\n db.add(hermann)\n\n journey = models.Journey(id=str(uuid.uuid4()),\n account_id=\"test\",\n visibility=\"PUBLIC\",\n start_time_utc=datetime.datetime.now(),\n stop_time_utc=datetime.datetime.now())\n db.add(journey)\n\n waypoint1 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=2.71,\n latitude=3.1416,\n longitude=1.618,\n height_m=10)\n db.add(waypoint1)\n\n waypoint2 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=5.1,\n latitude=3.1410,\n longitude=1.620,\n height_m=5)\n db.add(waypoint2)\n\n db.commit()\n logger.info(\"Created test account {}\".format(hermann))\n logger.info(\"Created test journey {}\".format(journey))", "def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)", "def init_sensor_db(self, drop_tables=True):\n logger.debug(\"Creating Database Engine.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n\n if drop_tables:\n logger.debug(\"Drop system table if within the existing database.\")\n Base.metadata.drop_all(db_engine)\n\n logger.debug(\"Creating Sentinel1ASF Database.\")\n Base.metadata.bind = db_engine\n Base.metadata.create_all()", "def get_data(traffic, weather, db):\r\n traffic_data = traffic.get_traffic()\r\n weather_data = weather.get_weather()\r\n db.traffic.insert_many(traffic_data)\r\n db.weather.insert_many(weather_data)\r\n print('于 {} 完成数据爬取及存储'.format(datetime.now()))\r\n print('当前交通数据条数 {}'.format(db.traffic.count_documents({})))\r\n print('当前天气数据条数 {}'.format(db.weather.count_documents({})))", "def setup_database() -> sqlite3.Cursor:\n conn = sqlite3.connect(':memory:')\n cursor = conn.cursor()\n\n insert_books(cursor)\n insert_lookups(cursor)\n\n return cursor", "def populate_water_measurements(cursor, archive, directory, station):\n csv_path = get_data_path(\n 'water',\n 'raw',\n archive,\n directory,\n f'{station}.csv'\n )\n\n with open(csv_path, 'r', encoding='utf-8') as file:\n reader = csv.reader(file, delimiter=';')\n header = next(reader)\n column_names_map = get_water_index_map(archive, header)\n\n if not column_names_map:\n return False\n\n water_body = get_water_definitions(archive)['body']\n\n for row in reader:\n column_values_map = get_water_value_map(row, column_names_map)\n if column_values_map:\n date = datetime.strptime(row[0], '%d.%m.%Y').date()\n data_columns = ', '.join(column_values_map.keys())\n data_values = ', '.join(column_values_map.values())\n cursor.execute(f'''INSERT INTO {water_body}_measurements (station_id, date, {data_columns})\n VALUES ({station}, '{str(date)}', {data_values})''')\n\n return True", "def weather_script(\n init_data_path: str,\n output_path: str,\n workers: int,\n weatherAPI_rpm: int,\n geoAPI_rpm: int,\n) -> None:\n unzip(init_data_path, output_path)\n top_hotels_dataframe_without_addresses = primary_data_proc(output_path)\n geocoder = geocoder_setup(geoAPI_rpm)\n top_hotels_df_with_addresses = define_address(\n top_hotels_dataframe_without_addresses,\n workers,\n geocoder,\n )\n cities, countries, coordinates = city_center_coord(top_hotels_df_with_addresses)\n weather_df = pd.concat(\n [\n prev_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n forecast_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n ]\n )\n\n logger.info(\"Start to save results\")\n save_main_info(output_path, weather_df, top_hotels_df_with_addresses)\n logger.info(\"Finish\")", "def weather():\r\n def weather_api_call():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' + weather_city_name + '&units=metric&appid=' + weather_api_key)\r\n resp_json = response.json()\r\n with open('weather.json', 'w') as outfile:\r\n # Uses the data from the API to overwrite the weather data\r\n json.dump(resp_json, outfile)\r\n outfile.close()\r\n\r\n def weather_data_extractor():\r\n with open('weather.json', 'r') as weather_json:\r\n weather_json = json.load(weather_json)\r\n temp = weather_json[\"main\"]\r\n weather_item = weather_json[\"weather\"]\r\n desc = weather_item[0]\r\n current_temperature = \"The current temperature is: \" + \\\r\n str(int(temp[\"temp\"])) + \"C\"\r\n current_feels_like = \"Feels like: \" + \\\r\n str(int(temp[\"feels_like\"])) + \"C\"\r\n forecast = desc[\"main\"]\r\n return current_feels_like, current_temperature, forecast\r\n\r\n weather_api_call()\r\n return weather_data_extractor()", "def init_and_update_observe_table(self):\n # print CHN_CITY_LIST_FILE\n location = ''\n id = ''\n f = open(CHN_CITY_LIST_FILE, 'r')\n for line in f.readlines():\n line_list = line.strip('\\n').split(':')\n location = line_list[0]\n id = line_list[1]\n pm = get_pm(location)\n # get current weather\n weather_dict = get_open_weather(id)\n if weather_dict not in ('', None, [], {}):\n if 'error' in pm or pm == False:\n weather_dict['aqi'] = '无数据'#'N/A'\n else:\n weather_dict['aqi'] = pm['quality'] + '(' + str(pm['aqi']) + ')'\n db_record = self.db.search_observe_record(str(id))\n # db_record = []\n now_date = get_local_format_time()\n if db_record != []:#update\n self.db.update_observe_data(weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'],id)\n else:#insert\n self.db.insert_observe_data(id,weather_dict['city'],weather_dict['ptime'],weather_dict['time'],now_date,weather_dict['WD'],weather_dict['WS'],weather_dict['SD'],weather_dict['weather'],weather_dict['img1'],weather_dict['img2'],weather_dict['temp'],weather_dict['temp1'],weather_dict['temp2'],weather_dict['aqi'])\n f.close()\n return True", "def get_gis_historical_data():\n logging.info(\"Generating baseline reference and historical weather data.\")\n \n # Initialising function variables\n fake = Faker()\n geolocator = Nominatim()\n config_data = get_config()\n locations = config_data[\"location\"]\n \n # Check if there are no duplicate locations in the config.yaml file.\n if len(locations) != len(set(locations)):\n logging.error(\"Duplicate location found. Please check config.yaml file.\")\n raise ValueError\n \n # Initialise pandas dataframe column name for baseline reference\n # and historical data.\n df_ref = pd.DataFrame(columns=[\"Location\", \"Latitude\"\n ,\"Longitude\", \"Elevation\"\n ,\"Timezone\"])\n df_hist = pd.DataFrame(columns=[\"Location\", \"Date\"\n ,\"Month\", \"Temperature_Min\"\n ,\"Temperature_Max\", \"Humidity\"\n ,\"Pressure\"])\n \n # Generate weather data for each location.\n for idx, loc in enumerate(locations):\n \n logging.info(\"Retrieving geolocation data for {}.\".format(loc))\n \n # Retrieving geolocation data from geopy library.\n loc_data = geolocator.geocode(loc)\n \n logging.info(\"Check if the location {} is valid.\".format(loc))\n if loc_data is None:\n logging.error(\"Invalid location value supplied ({}). Please check config.yaml file.\".format(loc))\n raise ValueError\n logging.info(\"The location {} is valid.\".format(loc))\n \n city = get_city(loc)\n lat = loc_data.latitude\n lon = loc_data.longitude\n \n # Retrieving elevation data for the location.\n elev = get_elevation_data(lat, lon)\n \n for month in range(1, 13):\n \n logging.info(\"Retrieving {} weather data for month {}.\".format(loc, month))\n \n for sample in range(config_data[\"gis\"][\"sampling_number\"]):\n \n temp_min = None\n temp_max = None\n humidity = None\n pressure = None\n \n while temp_min is None or temp_max is None or humidity is None or pressure is None:\n \n year = random.randint(config_data[\"gis\"][\"year_start\"], config_data[\"gis\"][\"year_end\"])\n\n _, last_day = calendar.monthrange(year, month)\n\n datetime_start = datetime.datetime(year, month, 1)\n datetime_end = datetime.datetime(year, month, last_day)\n\n date_gen = fake.date_time_between_dates(datetime_start=datetime_start\n ,datetime_end=datetime_end)\n\n forecast = forecastio.load_forecast(config_data[\"forecastio_api_key\"]\n ,lat\n ,lon\n ,time=date_gen\n ,units=\"si\")\n\n historical_data = forecast.json[\"daily\"][\"data\"][0]\n \n timezone = forecast.json.get(\"timezone\", None)\n temp_min = historical_data.get(\"temperatureMin\", None)\n temp_max = historical_data.get(\"temperatureMax\", None)\n humidity = historical_data.get(\"humidity\", None) * 100\n pressure = historical_data.get(\"pressure\", None)\n \n df_temp_hist = pd.Series(dict(zip(df_hist.columns\n ,[city, date_gen\n ,date_gen.month, temp_min\n ,temp_max, humidity\n ,pressure])))\n \n df_hist = df_hist.append(df_temp_hist, ignore_index=True)\n \n df_temp_ref = pd.Series(dict(zip(df_ref.columns\n ,[city, lat\n ,lon, elev\n ,timezone])))\n df_ref = df_ref.append(df_temp_ref, ignore_index=True)\n \n logging.info(\"Generating position to consolidate latitude, longitude and elevation data\")\n df_pos = df_ref[[\"Latitude\", \"Longitude\", \"Elevation\"]].round(2)\n df_pos[\"Elevation\"] = df_pos[\"Elevation\"].astype(int) \n df_ref[\"Position\"] = df_pos.astype(str).apply(lambda x: \",\".join(x), axis=1)\n \n logging.info(\"Saving baseline reference data.\")\n df_ref.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_reference_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline reference data.\")\n\n logging.info(\"Saving baseline historical data.\")\n df_hist.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_historical_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline historical data.\")", "def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()", "def main():\n database = dbdir + \"pyflangun.db\"\n sql_create_airports_table = \"\"\"CREATE TABLE IF NOT EXISTS airports (\n id integer PRIMARY KEY,\n name text NOT NULL,\n icao text\n );\"\"\"\n sql_create_weather_table = \"\"\"CREATE TABLE IF NOT EXISTS weather (\n id integer PRIMARY KEY,\n icao text NOT NULL,\n temp_f integer NOT NULL,\n dew_pt_f integer NOT NULL,\n status integer NOT NULL,\n FOREIGN KEY (icao) REFERENCES airports (icao)\n );\"\"\"\n # create a database connection\n conn = create_connection(database)\n # create tables\n if conn is not None:\n create_table(conn, sql_create_airports_table)\n create_table(conn, sql_create_weather_table)\n else:\n print(\"Error! cannot create the database connection.\")", "def main():\n # Create the flow\n with Flow('pickle flow') as flow:\n db_table = create_table()\n weather_data = get_weather(LAT_NYC, LON_NYC, 2018)\n parsed_data = parse_weather(weather_data)\n populated_table = store_weather(parsed_data)\n populated_table.set_upstream(db_table)\n\n # Run the flow\n flow.run()", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def store_data_in_db(sensors):\n connection = sqlite3.connect('sensordata.db')\n cursor = connection.cursor()\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS sensordata (\n ID int, NAME text, VALUE real, UNIT text, TIME text )\"\"\"\n )\n with connection:\n for sensor in sensors:\n cursor.execute(\"INSERT INTO sensordata VALUES (:id, :name, :value, :unit, :time)\",\n {'id': sensor['id'], 'name': sensor['name'],\n 'value': sensor['value'], 'unit': sensor['unit'], 'time': sensor['datetime']})", "def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)", "def create():\n\tcreate_db()", "def generate_polynesian_weather_data():\n weather_path = os.path.dirname(os.path.realpath(__file__))\n low_fp = weather_path + \"/polynesia_weather/low/1976/\"\n med_fp = weather_path + \"/polynesia_weather/med/1985/\"\n high_fp = weather_path + \"/polynesia_weather/high/1982/\"\n low_name = \"polynesia_1976\"\n med_name = \"polynesia_1985\"\n high_name = \"polynesia_1982\"\n generate_year_weather_data(low_fp, low_name)\n generate_year_weather_data(med_fp, med_name)\n generate_year_weather_data(high_fp, high_name)", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def _get_data(self):\n with open(self.filename, 'r') as fid:\n # we are not interested in the first line\n fid.readline()\n # second line\n line = fid.readline().strip()\n # the temperature is written in milli-degrees in the form\n # t=23456, but preceeded by a large HEX data dump in the form\n # 2c 00 4b 46 ff ff 0e 10 17 t=21875\n index = line.find('t=') + 2\n temperature = int(line[index:index + 6]) / 1e3\n time_now = self.get_timestamp()\n\n logging.debug(\n 'w1_temp: {0}, datetime: {1}, logger_id: {2}'.format(\n temperature,\n time_now,\n self.logger_id))\n\n ins = self.table(value=temperature,\n logger_id=self.logger_id,\n datetime=time_now)\n\n self.session.add(ins)\n self.session.commit()", "def _create_petition_(self):\n self.__weather = create(self.__latitude, self.__longitude)", "def create_temporal_database(dbif):\n global tgis_backend\n global tgis_version\n global tgis_db_version\n global tgis_database_string\n\n template_path = get_sql_template_path()\n msgr = get_tgis_message_interface()\n\n # Read all SQL scripts and templates\n map_tables_template_sql = open(os.path.join(\n template_path, \"map_tables_template.sql\"), 'r').read()\n raster_metadata_sql = open(os.path.join(\n get_sql_template_path(), \"raster_metadata_table.sql\"), 'r').read()\n raster3d_metadata_sql = open(os.path.join(template_path,\n \"raster3d_metadata_table.sql\"),\n 'r').read()\n vector_metadata_sql = open(os.path.join(template_path,\n \"vector_metadata_table.sql\"),\n 'r').read()\n raster_views_sql = open(os.path.join(template_path, \"raster_views.sql\"),\n 'r').read()\n raster3d_views_sql = open(os.path.join(template_path,\n \"raster3d_views.sql\"), 'r').read()\n vector_views_sql = open(os.path.join(template_path, \"vector_views.sql\"),\n 'r').read()\n\n stds_tables_template_sql = open(os.path.join(template_path,\n \"stds_tables_template.sql\"),\n 'r').read()\n strds_metadata_sql = open(os.path.join(template_path,\n \"strds_metadata_table.sql\"),\n 'r').read()\n str3ds_metadata_sql = open(os.path.join(template_path,\n \"str3ds_metadata_table.sql\"),\n 'r').read()\n stvds_metadata_sql = open(os.path.join(template_path,\n \"stvds_metadata_table.sql\"),\n 'r').read()\n strds_views_sql = open(os.path.join(template_path, \"strds_views.sql\"),\n 'r').read()\n str3ds_views_sql = open(os.path.join(template_path, \"str3ds_views.sql\"),\n 'r').read()\n stvds_views_sql = open(os.path.join(template_path, \"stvds_views.sql\"),\n 'r').read()\n\n # Create the raster, raster3d and vector tables SQL statements\n raster_tables_sql = map_tables_template_sql.replace(\"GRASS_MAP\", \"raster\")\n vector_tables_sql = map_tables_template_sql.replace(\"GRASS_MAP\", \"vector\")\n raster3d_tables_sql = map_tables_template_sql.replace(\n \"GRASS_MAP\", \"raster3d\")\n\n # Create the space-time raster, raster3d and vector dataset tables\n # SQL statements\n strds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"strds\")\n stvds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"stvds\")\n str3ds_tables_sql = stds_tables_template_sql.replace(\"STDS\", \"str3ds\")\n\n msgr.message(_(\"Creating temporal database: %s\" % (str(tgis_database_string))))\n\n if tgis_backend == \"sqlite\":\n # We need to create the sqlite3 database path if it does not exist\n tgis_dir = os.path.dirname(tgis_database_string)\n if not os.path.exists(tgis_dir):\n try:\n os.makedirs(tgis_dir)\n except Exception as e:\n msgr.fatal(_(\"Unable to create SQLite temporal database\\n\"\n \"Exception: %s\\nPlease use t.connect to set a \"\n \"read- and writable temporal database path\" % (e)))\n\n # Set up the trigger that takes care of\n # the correct deletion of entries across the different tables\n delete_trigger_sql = open(os.path.join(template_path,\n \"sqlite3_delete_trigger.sql\"),\n 'r').read()\n indexes_sql = open(os.path.join(template_path, \"sqlite3_indexes.sql\"),\n 'r').read()\n else:\n # Set up the trigger that takes care of\n # the correct deletion of entries across the different tables\n delete_trigger_sql = open(os.path.join(template_path,\n \"postgresql_delete_trigger.sql\"),\n 'r').read()\n indexes_sql = open(os.path.join(template_path,\n \"postgresql_indexes.sql\"), 'r').read()\n\n # Connect now to the database\n if dbif.connected is not True:\n dbif.connect()\n\n # Execute the SQL statements for sqlite\n # Create the global tables for the native grass datatypes\n dbif.execute_transaction(raster_tables_sql)\n dbif.execute_transaction(raster_metadata_sql)\n dbif.execute_transaction(raster_views_sql)\n dbif.execute_transaction(vector_tables_sql)\n dbif.execute_transaction(vector_metadata_sql)\n dbif.execute_transaction(vector_views_sql)\n dbif.execute_transaction(raster3d_tables_sql)\n dbif.execute_transaction(raster3d_metadata_sql)\n dbif.execute_transaction(raster3d_views_sql)\n # Create the tables for the new space-time datatypes\n dbif.execute_transaction(strds_tables_sql)\n dbif.execute_transaction(strds_metadata_sql)\n dbif.execute_transaction(strds_views_sql)\n dbif.execute_transaction(stvds_tables_sql)\n dbif.execute_transaction(stvds_metadata_sql)\n dbif.execute_transaction(stvds_views_sql)\n dbif.execute_transaction(str3ds_tables_sql)\n dbif.execute_transaction(str3ds_metadata_sql)\n dbif.execute_transaction(str3ds_views_sql)\n\n # The delete trigger\n dbif.execute_transaction(delete_trigger_sql)\n # The indexes\n dbif.execute_transaction(indexes_sql)\n\n # Create the tgis metadata table to store the database\n # initial configuration\n # The metadata table content\n metadata = {}\n metadata[\"tgis_version\"] = tgis_version\n metadata[\"tgis_db_version\"] = tgis_db_version\n metadata[\"creation_time\"] = datetime.today()\n _create_tgis_metadata_table(metadata, dbif)\n\n dbif.close()", "def init_beeswax_db(cls):\n global _INITIALIZED\n if _INITIALIZED:\n return\n\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s' % {'db': cls.db_name}, wait=True)\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s_other' % {'db': cls.db_name}, wait=True)\n\n if cls.load_data:\n\n data_file = cls.cluster.fs_prefix + u'/beeswax/sample_data_échantillon_%d.tsv'\n\n # Create a \"test_partitions\" table.\n CREATE_PARTITIONED_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`test_partitions` (foo INT, bar STRING)\n PARTITIONED BY (baz STRING, boom INT)\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\" % {'db': cls.db_name}\n make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)\n cls._make_data_file(data_file % 1)\n\n LOAD_DATA = \"\"\"\n LOAD DATA INPATH '%(data_file)s'\n OVERWRITE INTO TABLE `%(db)s`.`test_partitions`\n PARTITION (baz='baz_one', boom=12345)\n \"\"\" % {'db': cls.db_name, 'data_file': data_file % 1}\n make_query(cls.client, LOAD_DATA, wait=True, local=False)\n\n # Insert additional partition data into \"test_partitions\" table\n ADD_PARTITION = \"\"\"\n ALTER TABLE `%(db)s`.`test_partitions` ADD PARTITION(baz='baz_foo', boom=67890) LOCATION '%(fs_prefix)s/baz_foo/boom_bar'\n \"\"\" % {'db': cls.db_name, 'fs_prefix': cls.cluster.fs_prefix}\n make_query(cls.client, ADD_PARTITION, wait=True, local=False)\n\n # Create a bunch of other tables\n CREATE_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`%(name)s` (foo INT, bar STRING)\n COMMENT \"%(comment)s\"\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\"\n\n # Create a \"test\" table.\n table_info = {'db': cls.db_name, 'name': 'test', 'comment': 'Test table'}\n cls._make_data_file(data_file % 2)\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)\n\n if is_live_cluster():\n LOG.warning('HUE-2884: We cannot create Hive UTF8 tables when live cluster testing at the moment')\n else:\n # Create a \"test_utf8\" table.\n table_info = {'db': cls.db_name, 'name': 'test_utf8', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 3, 'utf-8')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)\n\n # Create a \"test_latin1\" table.\n table_info = {'db': cls.db_name, 'name': 'test_latin1', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 4, 'latin1')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)\n\n # Create a \"myview\" view.\n make_query(cls.client, \"CREATE VIEW `%(db)s`.`myview` (foo, bar) as SELECT * FROM `%(db)s`.`test`\" % {'db': cls.db_name}, wait=True)\n\n _INITIALIZED = True", "def dbUpdate():\n dbAddress = config.get('database', 'dbAddress')\n dbUser = config.get('database', 'dbUser')\n dbPassword = config.get('database', 'dbPassword')\n dbName = config.get('database', 'dbName')\n dbPort = config.getint('database', 'dbPort')\n con = MySQLdb.connect(host=dbAddress, port=dbPort, user=dbUser, passwd=dbPassword,\n db=dbName)\n c = con.cursor()\n\n date = datetime.datetime.now()\n c.execute(\"INSERT INTO sensor_data (date, dht_temp, dht_humidity, cpu_temp, \"\n \"solar_voltage, solar_current, battery_voltage, battery_current, \"\n \"load_voltage, load_current) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,\"\n \"%s)\",\n (date, dht_temp, dht_humidity, cpu_temp, sol_volt_v, sol_curr_ma,\n bat_volt_v, bat_curr_ma, load_volt_v, load_curr_ma))\n\n con.commit()\n con.close()", "def create_db():\n database.db.create_all()\n get_ulm()\n for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'):\n fixtures = JSONLoader().load(fixture_file)\n load_fixtures(database.db, fixtures)\n MigrationManager().stamp_db()", "def to_WTH_converter(self, weather_data, dest_dir):\n ds_all = weather_data.get_global_dataset()\n if self.country is None:\n print(\"Country given is erroneous:\")\n return\n elif self.country == \"globe\":\n lon_num_start = 0\n lon_num_stop = weather_data.get_num_of_attribute('longitude')\n lat_num_start = 0\n lat_num_stop = weather_data.get_num_of_attribute('latitude')\n else:\n lon_num_start, lon_num_stop, lat_num_start, lat_num_stop = weather_data.get_country_boundary(self.country)\n\n\n # top bottom, left to right\n lon_num_start = 397\n lat_num_start = 184\n for lon_i in range(lon_num_start, lon_num_stop + 1):\n # for lon_i in range(lon_num_start, lon_num_stop+1):\n lon = ds_all.longitude.isel(longitude=lon_i).values.tolist()\n\n for lat_i in range(lat_num_start, lat_num_stop+1):\n # for lat_i in range(lat_num_start, lat_num_stop + 1):\n lat = ds_all.latitude.isel(latitude=lat_i).values.tolist()\n\n # create a dynamic header with updated LON, LAT info and move it into the folder given\n wth_header_u = ut.format_header(lat_i + 1, lon_i + 1, lat, lon)\n wth_header = dest_dir + \"/\" + wth_header_u\n shutil.move(wth_header_u, wth_header)\n\n # open in appending mode\n fwth = open(wth_header, \"a+\")\n\n # loop through daily weather data\n for t, date in enumerate(self.years):\n daily_data_vars = ut.get_daily_data_vars(ds_all, lat_i, lon_i, t)\n # disregard all NAN values\n if daily_data_vars is None:\n fwth.close()\n os.remove(wth_header)\n break\n\n if t == 0:\n ut.update_table(wth_header_u, lat, lon)\n\n entry = ut.format_data_vars_entry(daily_data_vars, date)\n\n # append this entry into the file\n fwth.write(entry)\n print(\"Added entry:\", entry)\n\n # close file after writing\n fwth.close()\n print(\"Output WTH:\", wth_header)", "def __upload_data(self):\n data_path = \"database\"\n os.makedirs(data_path, exist_ok=True)\n try:\n conn = sqlite3.connect('database/customers.db')\n query = '''CREATE TABLE IF NOT EXISTS all_customers_database (\n first_name TEXT, second_name TEXT,\n gender TEXT, account_type TEXT, account_number INTEGER PRIMARY KEY UNIQUE NOT NULL,\n account_password VARCHAR, account_balance REAL );'''\n #Create table\n cursor = conn.cursor()\n print(\"Connection sucessful\")\n cursor.execute(query)\n conn.commit()\n print(\"Table created\")\n #Insert a row to a database\n insert_query ='''INSERT INTO all_customers_database\n (first_name, second_name, gender, account_type, account_number, account_password, account_balance)\n VALUES \n (?, ?, ?, ?, ?, ?, ?);'''\n conn.execute(insert_query, (self.first_name, self.second_name, self.gender, self.account_type, self.account_number, self.account_password, self.account_balance))\n print(\"Your details saved successfully.\")\n except sqlite3.Error as err:\n # print(\"Error while creating a sqlite table \", err)\n print(\"Error creating database\")\n finally:\n if conn:\n conn.close()\n # print(\"Sqlite connection closed.\")", "def checkFirstRun():\n my_cfg = Path.home() / \".bandaid\" / \"bandaid.cfg\"\n my_db = Path.home() / \".bandaid\" / \"bandaid.db\"\n if Path(f'{Path.home()}/.bandaid/').exists():\n return my_db\n print(\"First run, making the donuts...\")\n Path.mkdir(Path.home() / \".bandaid\", exist_ok=True)\n zipcode = inputZip()\n lat, lng = getLatLng(zipcode)\n with open(my_cfg, \"w+\") as f:\n f.write('DBPATH=~/.bandaid/bandaid.db\\n')\n f.write(f'ZIPCODE={zipcode}')\n user = (lambda: environ[\"USERNAME\"]\n if \"C:\" in getcwd() else environ[\"USER\"])()\n initDB(my_db, zipcode, user, lat, lng)\n print(f\"Database and config file created at {my_cfg}\")\n return my_db", "def create_db(self):", "def add_water():\n\n user_id = session['user_id']\n drink = request.form.get('drink')\n postal = request.form.get('postal')\n time_updated = datetime.now()\n new_drink = Water(ounces=drink, user_id=user_id, time_updated=time_updated, postal=postal)\n\n db.session.add(new_drink)\n db.session.commit()\n \n time_zone = session[\"user_timezone\"]\n \n current_time = datetime.now().astimezone(pytz.timezone(time_zone))\n\n current_date = current_time.date()\n\n total_water_today = db.session.query(func.sum(Water.ounces)).filter(Water.user_id==user_id, Water.time_updated >= current_date).scalar()\n\n if int(total_water_today) != None or int(total_water_today) != 0:\n total_cups_today = round((total_water_today/8),2)\n else:\n total_water_today = 0\n total_cups_today = 0\n\n print('user id', user_id)\n print('current date', current_date)\n return f\"Today's Water: {total_water_today} Oz ({total_cups_today} Cups)\"", "def init():\n database.create_tables([Tracker])\n database.commit()", "def initialize_sqlite_database(sel_wormbase_version,\n strain_only=False):\n start = arrow.utcnow()\n console.log(\"Initializing Database\")\n\n SQLITE_PATH = f\"base/cendr.{DATASET_RELEASE}.{sel_wormbase_version}.db\"\n SQLITE_BASENAME = os.path.basename(SQLITE_PATH)\n\n # Download wormbase files\n if strain_only is False:\n if os.path.exists(SQLITE_PATH):\n os.remove(SQLITE_PATH)\n\n if not os.path.exists(DOWNLOAD_PATH):\n os.makedirs(DOWNLOAD_PATH)\n\n # Parallel URL download\n console.log(\"Downloading Wormbase Data\")\n download([URLS.GENE_GFF_URL,\n URLS.GENE_GTF_URL,\n URLS.GENE_IDS_URL,\n URLS.HOMOLOGENE_URL,\n URLS.ORTHOLOG_URL,\n URLS.TAXON_ID_URL],\n DOWNLOAD_PATH)\n\n gff_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_GFF_URL)\n gtf_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_GTF_URL)\n gene_ids_fname = download_fname(DOWNLOAD_PATH, URLS.GENE_IDS_URL)\n homologene_fname = download_fname(DOWNLOAD_PATH, URLS.HOMOLOGENE_URL)\n ortholog_fname = download_fname(DOWNLOAD_PATH, URLS.ORTHOLOG_URL)\n\n from base.application import create_app\n app = create_app()\n app.config['SQLALCHEMY_DATABASE_URI'] = f\"sqlite:///{SQLITE_BASENAME}\"\n app.app_context().push()\n\n if strain_only is True:\n db.metadata.drop_all(bind=db.engine, checkfirst=True, tables=[Strain.__table__])\n db.metadata.create_all(bind=db.engine, tables=[Strain.__table__])\n else:\n db.create_all(app=app)\n db.session.commit()\n\n console.log(f\"Created {SQLITE_PATH}\")\n\n ################\n # Load Strains #\n ################\n console.log('Loading strains...')\n db.session.bulk_insert_mappings(Strain, fetch_andersen_strains())\n db.session.commit()\n console.log(f\"Inserted {Strain.query.count()} strains\")\n\n if strain_only is True:\n console.log('Finished loading strains')\n return\n\n ################\n # Set metadata #\n ################\n console.log('Inserting metadata')\n metadata = {}\n metadata.update(vars(constants))\n metadata.update({\"CENDR_VERSION\": CENDR_VERSION,\n \"APP_CONFIG\": APP_CONFIG,\n \"DATASET_RELEASE\": DATASET_RELEASE,\n \"WORMBASE_VERSION\": sel_wormbase_version,\n \"RELEASES\": RELEASES,\n \"DATE\": arrow.utcnow()})\n for k, v in metadata.items():\n if not k.startswith(\"_\"):\n # For nested constants:\n if type(v) == type:\n for name in [x for x in dir(v) if not x.startswith(\"_\")]:\n key_val = Metadata(key=\"{}/{}\".format(k, name),\n value=getattr(v, name))\n db.session.add(key_val)\n else:\n key_val = Metadata(key=k, value=str(v))\n db.session.add(key_val)\n\n db.session.commit()\n\n ##############\n # Load Genes #\n ##############\n console.log('Loading summary gene table')\n genes = fetch_gene_gff_summary(gff_fname)\n db.session.bulk_insert_mappings(WormbaseGeneSummary, genes)\n db.session.commit()\n\n console.log('Loading gene table')\n db.session.bulk_insert_mappings(WormbaseGene, fetch_gene_gtf(gtf_fname, gene_ids_fname))\n gene_summary = db.session.query(WormbaseGene.feature,\n db.func.count(WormbaseGene.feature)) \\\n .group_by(WormbaseGene.feature) \\\n .all()\n gene_summary = '\\n'.join([f\"{k}: {v}\" for k, v in gene_summary])\n console.log(f\"============\\nGene Summary\\n------------\\n{gene_summary}\\n============\")\n\n ###############################\n # Load homologs and orthologs #\n ###############################\n console.log('Loading homologs from homologene')\n db.session.bulk_insert_mappings(Homologs, fetch_homologene(homologene_fname))\n db.session.commit()\n\n console.log('Loading orthologs from WormBase')\n db.session.bulk_insert_mappings(Homologs, fetch_orthologs(ortholog_fname))\n db.session.commit()\n\n #############\n # Upload DB #\n #############\n\n # Upload the file using todays date for archiving purposes\n console.log(f\"Uploading Database ({SQLITE_BASENAME})\")\n upload_file(f\"db/{SQLITE_BASENAME}\", SQLITE_PATH)\n\n diff = int((arrow.utcnow() - start).total_seconds())\n console.log(f\"{diff} seconds\")\n\n # =========================== #\n # Generate gene id dict #\n # =========================== #\n # Create a gene dictionary to match wormbase IDs to either the locus name\n # or a sequence id\n gene_dict = {x.gene_id: x.locus or x.sequence_name for x in WormbaseGeneSummary.query.all()}\n pickle.dump(gene_dict, open(\"base/static/data/gene_dict.pkl\", 'wb'))", "def initialization():\r\n\r\n logging.debug('initialization()')\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Create tables\r\n c.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS wallpapers (\r\n id integer primary key,\r\n iurl text unique,\r\n iname text,\r\n ipath text,\r\n isource text)\r\n \"\"\")\r\n\r\n conn.close()", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def perform_watering_update(self, plant):\n try:\n with self.connection.cursor() as cursor:\n sql = \"INSERT INTO test.watering_data\\\n (balance_id, logdate, start_weight, end_weight, status, experiment_id)\\\n VALUES({0}, NOW(), {1}, {2}, {3}, '{4}')\".format(plant.get_balance(),\n plant.get_start_weight(),\n plant.get_end_weight(),\n plant.get_status(),\n plant.get_experiment_id())\n\n # Execute our statement\n if is_statement_safe(sql):\n cursor.execute(sql)\n self.connection.commit()\n else:\n print(\"DANGEROUS STATEMENT\")\n sys.exit()\n except (pymysql.err.DatabaseError,\n pymysql.err.IntegrityError,\n pymysql.err.MySQLError):\n sys.stderr.write(\n \"Most likely an IntegrityError error due to duplicate entry\")\n return 2", "def CreateDB(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateClassTable'])\r\n\t\t\tfor ii,classname in enumerate(self.SQLCMDs['ClassesList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertClass'],(ii,classname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSetTable'])\r\n\t\t\tfor ii,setname in enumerate(self.SQLCMDs['SetList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertSet'],(ii,setname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSampleTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictListTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictBuildTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateWordLists'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateFeatureTable'])\r\n\t\t\tself.DB_Connect.commit()\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to create the database: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def main():\n\n database_file = Path(__file__).parent.parent / \"src\" / \"galois\" / \"_databases\" / \"irreducible_polys.db\"\n conn, cursor = create_database(database_file)\n\n _add_hpl_1998(conn, cursor)\n\n conn.close()", "def create_db(self):\n return None", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def getTodaysWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tweather = {} \n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\t\t\n\t\t# Getting todays weather data and populating the dictionary\n\t\tif fio.has_daily() is True and fio.has_hourly() is True:\n\t\t daily = FIODaily.FIODaily(fio)\n\t\t hourly = FIOHourly.FIOHourly(fio)\n\t\t for day in xrange(0, 1):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(daily.get_day(day)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\tif item == \"sunsetTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"sunriseTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\tfor item in hourly.get_hour(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[\"current\"] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"town\"] = self.helper.getCoords(keyword)[2]\n\t\telse:\n\t\t\treturn 'No Todays data'\n\n\t\treturn weather", "def create_tables(cxn):\n\tcursor = cxn.cursor()\n\tcursor.execute(\"DROP TABLE IF EXISTS WEATHER\")\n\tcursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS WEATHER(\n\t\tstate varchar(3),\n\t\tcity varchar (15),\n\t\tobs_date varchar(12),\n\t\thour int,\n\t\tminute int,\n\t\tcurr_temp float,\n\t\tunique(state, city, obs_date, hour, minute)\n\t\t)\"\"\")\n\tcursor.close()", "def ready(self):\n try:\n from workouts.models import ExerciseType\n from workouts import EXERCISE_TYPE_ROUNDS, EXERCISE_TYPE_TIME\n exercise_type_time = ExerciseType.objects.get_or_create(type_name=EXERCISE_TYPE_TIME)\n exercise_type_rounds = ExerciseType.objects.get_or_create(type_name=EXERCISE_TYPE_ROUNDS)\n except Exception:\n pass", "def setup_database(self, config_dict):\n\n # This will create the database if it doesn't exist, then return an\n # opened instance of the database manager. \n dbmanager = self.engine.db_binder.get_manager(self.data_binding, initialize=True)\n syslog.syslog(syslog.LOG_INFO, \"engine: Using binding '%s' to database '%s'\" % (self.data_binding, dbmanager.database_name))\n \n # Back fill the daily summaries.\n syslog.syslog(syslog.LOG_INFO, \"engine: Starting backfill of daily summaries\")\n t1 = time.time()\n nrecs, ndays = dbmanager.backfill_day_summary()\n tdiff = time.time() - t1\n if nrecs:\n syslog.syslog(syslog.LOG_INFO, \n \"engine: Processed %d records to backfill %d day summaries in %.2f seconds\" % (nrecs, ndays, tdiff))\n else:\n syslog.syslog(syslog.LOG_INFO,\n \"engine: Daily summaries up to date.\")", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")", "def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)", "def insert_db():\n populate_tables()", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def _create_pysam_wfile(self, resource, meta):\n # pylint: disable=attribute-defined-outside-init,consider-using-with\n self._temp_dir = TemporaryDirectory()\n fname = os.path.join(self._temp_dir.name, 'weather.csv')\n logger.debug('Creating PySAM weather data file: {}'.format(fname))\n\n # ------- Process metadata\n m = pd.DataFrame(meta).T\n timezone = m['timezone']\n m['Source'] = 'NSRDB'\n m['Location ID'] = meta.name\n m['City'] = '-'\n m['State'] = m['state'].apply(lambda x: '-' if x == 'None' else x)\n m['Country'] = m['country'].apply(lambda x: '-' if x == 'None' else x)\n m['Latitude'] = m['latitude']\n m['Longitude'] = m['longitude']\n m['Time Zone'] = timezone\n m['Elevation'] = m['elevation']\n m['Local Time Zone'] = timezone\n m['Dew Point Units'] = 'c'\n m['DHI Units'] = 'w/m2'\n m['DNI Units'] = 'w/m2'\n m['Temperature Units'] = 'c'\n m['Pressure Units'] = 'mbar'\n m['Wind Speed'] = 'm/s'\n keep_cols = [c for c in m.columns if c not in self.WF_META_DROP_COLS]\n m[keep_cols].to_csv(fname, index=False, mode='w')\n\n # --------- Process data\n var_map = {'dni': 'DNI',\n 'dhi': 'DHI',\n 'wind_speed': 'Wind Speed',\n 'air_temperature': 'Temperature',\n 'dew_point': 'Dew Point',\n 'surface_pressure': 'Pressure',\n }\n resource = resource.rename(mapper=var_map, axis='columns')\n\n time_index = resource.index\n # Adjust from UTC to local time\n local = np.roll(resource.values, int(timezone * self.time_interval),\n axis=0)\n resource = pd.DataFrame(local, columns=resource.columns,\n index=time_index)\n mask = (time_index.month == 2) & (time_index.day == 29)\n time_index = time_index[~mask]\n\n df = pd.DataFrame(index=time_index)\n df['Year'] = time_index.year\n df['Month'] = time_index.month\n df['Day'] = time_index.day\n df['Hour'] = time_index.hour\n df['Minute'] = time_index.minute\n df = df.join(resource.loc[~mask])\n\n df.to_csv(fname, index=False, mode='a')\n\n return fname", "def init():\n\n # delete existing file\n if os.path.exists(DBFILE):\n os.remove(DBFILE)\n\n db = sqlite3.connect(DBFILE)\n # create tables\n create(db, PARAGRAPH, \"paragraph\")\n create(db, QUESTION, \"question\")\n create(db, ANSWER, \"answer\")\n\n return db", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def write_to_db(batch_num,pH,cursor):\n\n\t#Batch Number\n\tbatch = 1\n\t\t\n\n\tcur_time = datetime.now()\n\theater_state = 1\n\ttemp = 30.1\n\tamb_hum, amb_temp = 25.1,30.2\n\n\tcursor.execute('''INSERT INTO Kombucha_Data('Time', 'Batch', 'Heater State', 'Temperature', 'Ambient Temperature', 'Ambient Humidity','pH')\n\t\t\t\t VALUES(?,?,?,?,?,?,NULL)''', (cur_time, batch_num, heater_state, temp, amb_temp, amb_hum))\n\tcursor.commit()\n\treturn", "def create_sqlite_table(self):\n self.print_datetime_output('Connect to data base %s' % self.db_name)\n con = sqlite3.connect(self.db_name)\n cur = con.cursor()\n\n # check if table exists\n cur.execute(\"select count(*) from sqlite_master where type='table' and name='%s'\" % self.db_table)\n if cur.fetchall()[0][0] == 1:\n self.print_datetime_output('Previous table %s was dropped' % self.db_table)\n cur.execute(\"DROP TABLE %s;\" % self.db_table)\n\n self.print_datetime_output('Create table %s and import data from csv file %s' % (self.db_table,\n self.time_series_file_name))\n cur.execute(\"CREATE TABLE %s (timestamp, close_USD);\" % self.db_table)\n\n with open(self.file_name, 'r') as fin:\n dr = csv.DictReader(fin)\n to_db = [(i['timestamp'], i['close (USD)']) for i in dr]\n\n cur.executemany(\"INSERT INTO %s (timestamp, close_USD) VALUES (?, ?);\" % self.db_table, to_db)\n con.commit()\n return con", "def build(self):\n \n # create db with empty tables\n dbpath, config = self.setup()\n \n # avoid work if setup decided db exists and build can be skipped\n if dbpath is None:\n return \n \n # check prerequisite files \n obopath = check_file(config.obo, dbpath, \"obo\")\n refpath = check_file(config.reference_phenotypes, dbpath, \n \"reference_phenotypes\")\n freqpath = check_file(config.phenotype_frequencies, dbpath,\n \"phenotype_frequencies\")\n\n self.logger.msg1(\"Loading ontology\") \n obo = MinimalObo(obopath, True)\n \n self.logger.msg1(\"Preparing phenotype frequencies\")\n fill_phenotype_frequency_table(dbpath, freqpath)\n \n # fill database with data\n self.logger.msg1(\"Preparing references\")\n fill_concise_reference_table(dbpath, refpath) \n fill_complete_reference_table(dbpath, obo, config) \n \n self._end()", "def database_conn(self, table, hr, data):\n # origionally from https://www.w3schools.com/python/python_mysql_insert.asp\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=WEATHERUSER,\n password=DBPASS,\n database=\"weather\"\n )\n mycursor = mydb.cursor()\n sql = f\"UPDATE weather.{table} SET hr = {hr}, dt = %s, temp = %s, pressure = %s, humidity = %s, dewpoint = %s, rain = %s where hr = {hr}\"\n mycursor.execute(sql, data)\n mydb.commit()", "def create(self):\n db.create_all()", "def setupDatabases(self):\n param = self.getDefaultDatabaseConnectionParameter()\n db = DatabaseFactory.getDatabase(self.defaultDriver(), {})\n db.createDatabase(param)\n db.connect(param)\n if db.isConnected():\n self.settingsDb = db\n db.createObservations()\n db.createSensors()\n else:\n return False\n # replace by settings validation method later\n return self.check()", "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def gatherData():\n\n # connect to database, set up the tweepy API object, and find the next date to search\n\n cnx = sqlite3.connect(DB_FILE)\n api = generateAPI(wait_on_rate_limit=True, wait_on_rate_limit_notify=True, **CREDENTIALS)\n\n nextdate = findNextDate(cnx, FIRSTDATE)\n year = nextdate[:4]\n\n # attempt to scrape box office data\n\n bodata = getTopMovies(BO_ENDPOINT, nextdate, CNT_MOVIES)\n\n if not bodata.empty:\n bodata.to_sql('boxoffice', ENGINE, if_exists='append', index=False)\n print(\"Box Office Data for [{0}] Written to Database\".format(nextdate))\n else:\n raise BOError(\"Error Scraping/Writing Box Office Data for [{0}]\".format(nextdate))\n\n # attempt to collect tweet data\n\n for movie in bodata.title:\n try:\n tweets = searchMovie(api, movie, nextdate, MAX_TWEETS)\n if not tweets.empty:\n tweets.to_sql('tweets', ENGINE, if_exists='append', index=False)\n print(\"Tweets for [{0}] Written to Database\".format(movie))\n else:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n except tweepy.error.TweepError:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n\n # attempt to collect movie metadata\n\n for movie in bodata.title:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), year)\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), str(int(year)-1))\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n print(\"Movie: [{0}] Could Not be Found via OMDB\".format(movie))\n\n # commit changes and close DB connection\n\n cnx.commit()\n cnx.close()\n\n print(\"\\nAll Data for {0} Successfully Added to the Database!\\n\".format(nextdate))\n return nextdate", "def create():\n upgrade()\n populate()", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Location.query.delete()\n\n # Add sample users and locations\n\n trinity = User(fname='Trinity', email='[email protected]',\n username='questionit', password='l0lagent')\n neo = User(fname='Neo', email='[email protected]',\n username='neo', password='l0lagent')\n\n tacorea = Location(yelp_id='tacorea-san-francisco', name='Tacorea',\n latitude='37.7749', longitude='122.3392',\n address='809 Bush St, San Francisco, CA 94108',\n yelp_url='[email protected]', pic='pic')\n\n db.session.add_all([trinity, neo, tacorea])\n db.session.commit()", "def get_forecast(location_list):\n #Might need to munge location to get a query out of it\n location, human_location = location_list\n date = datetime.datetime.today()\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/ForecastXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n forecast = {'location': location, 'human_location': human_location}\n #Find forecast\n simple = root.find('simpleforecast')\n for day in simple.findall('forecastday'):\n forecast['forecast_date'] = parser.parse(day.find('date').find('pretty').text)\n forecast['high_temp'] = day.find('high').find('fahrenheit').text\n forecast['low_temp'] = day.find('low').find('fahrenheit').text\n forecast['conditions'] = day.find('conditions').text\n forecast['icon'] = day.find('icon').text\n forecast['skyicon'] = day.find('skyicon').text\n try:\n f, created = ForecastDay.objects.get_or_create(**forecast)\n if created:\n f.save()\n except:\n logging.info(\"Long Range Forecast Data missing or already created\")\n \n \n #Find Moon\n moon = root.find('moon_phase')\n illuminated = moon.find('percentIlluminated')\n age = moon.find('ageOfMoon')\n sun_rise = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunrise')))\n sun_set = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunset'))) \n #It doesn't error, so it appears to be doing what it should.\n f = ForecastDay.objects.get(forecast_date=date)\n f.sun_rise = sun_rise\n f.sun_set = sun_set\n f.moon_illuminated = illuminated.text\n f.moon_age = age.text\n try:\n f.save()\n except:\n logging.info(\"Moon Data missing or no new data available\")", "def get_weather(days, hours, db):\n days = format_list_for_db(days)\n hours = format_list_for_db(hours)\n sql = f\"SELECT * FROM weather WHERE day in {days} AND HOUR in {hours}\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.close()\n\n weathers = []\n if len(data) > 0:\n for weather in data:\n weather = {\"hour\": weather[1],\n \"day\": day_absolute_to_relative(weather[2]),\n \"temperature\": weather[3],\n \"apparenttemperature\": weather[4],\n \"precipitationintensity\": weather[5],\n \"precipitationprobability\": weather[6],\n \"humidity\": weather[7],\n \"dewpoint\": weather[8],\n \"windspeed\": weather[9],\n \"windbearing\": weather[10],\n \"windgust\": weather[11],\n \"pressure\": weather[12],\n \"cloudcover\": weather[13],\n \"uvindex\": weather[14],\n \"visibility\": weather[15]}\n weathers.append(weather)\n return weathers", "def create_table(self):\n c = self.conn.cursor()\n c.execute(\"CREATE TABLE sensor_data (mac text, name text, temperature real, light integer, moisture real, conductivity real, battery real, ts_utc int, date_iso text, firmware text )\")", "def _create_db(self):\n self.db = easydms.dbcore.Database(\":memory:\")\n self.db.create_db()", "def createDatabase(self):\n \n try:\n self.conn = MySQLdb.connect (host = settings.DATABASES['default']['HOST'], user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'])\n except MySQLdb.Error, e:\n raise Exception(\"Cannot connect to the MySQL server.\")\n\n try:\n cursor = self.conn.cursor()\n cursor.execute(\"CREATE DATABASE {};\".format(settings.DBNAME))\n except MySQLdb.Error, e:\n raise Exception(\"Cannot create the database {}. {}\".format(settings.DBNAME, e))\n finally:\n self.conn.close()\n\n try:\n self.conn = MySQLdb.connect (host = settings.DATABASES['default']['HOST'], user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = settings.DBNAME)\n except MySQLdb.Error, e:\n raise Exception(\"Cannot connect to the database {}. {}\".format(settings.DBNAME, e))\n\n try:\n cursor = self.conn.cursor()\n # creating the datasets table\n cursor.execute(\"CREATE TABLE datasets (dataset VARCHAR(255) PRIMARY KEY, datasetid INT UNIQUE AUTO_INCREMENT, ximagesz INT, yimagesz INT, zimagesz INT, xoffset INT, yoffset INT, zoffset INT, xvoxelres DOUBLE, yvoxelres DOUBLE, zvoxelres DOUBLE, scalingoption INT, scalinglevels INT, starttime INT, endtime INT);\")\n cursor.execute(\"CREATE TABLE channels (channel_name VARCHAR(255) ,dataset VARCHAR(255) REFERENCES datasets(dataset), PRIMARY KEY (channel_name,dataset), channel_type VARCHAR(255), channel_datatype VARCHAR(255), startwindow INT, endwindow INT);\")\n self.conn.commit()\n except MySQLdb.Error, e:\n raise Exception(\"Cannot create the tables for the database {}, {}\".format(settings.DBNAME, e))\n finally:\n self.conn.close()\n \n def pipInstall(self):\n \"\"\"Installing all the pip packages\"\"\"\n\n print \"Does Nothing\"\n\n def restart(self):\n \"\"\"Restart all the services\"\"\"", "def populate_db(self):\n # Get donors\n log.info(\"Populating donors.\")\n\n self.r.hmset('Thomas', {'donations': '500', 'email': '[email protected]', 'city': 'Athens', 'state': 'GA', 'zip': 30606})\n\n self.r.hmset('Ted', {'donations': '1', 'email': '[email protected]', 'city': 'Memphis', 'state': 'TN', 'zip': 38104})\n\n self.r.hmset(\"Bailey\", {'donations': '1000', 'email': '[email protected]', 'city': 'Washington', 'state': 'DC', 'zip': 12345})", "def main():\n\n conn = psycopg2.connect(**env.DATABASE)\n cursor = conn.cursor()\n\n for file, city in env.supported_cities().items():\n try:\n data = add_metadata(parse_html(city, get_html(city)))\n save_data_to_db(cursor, data, file.title())\n except Exception as e:\n print(\"Failed to scrape '%s': %s\" %(city, e))\n print(traceback.format_exc())\n\n conn.commit()\n conn.close()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def create_database(self):\n\n try: \n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n print(self.filepath_render_database)\n\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS render_information(\n \n object_type VARCHAR(255),\n name VARCHAR(255),\n radius REAL,\n polar_angle_min REAL,\n polar_anglel_max REAL,\n polar_angle_segments REAL,\n polar_angle_random_rad REAL,\n azimuth_angle_min REAL,\n azimuth_angle_max REAL,\n azimuth_angle_segments REAL,\n azimuth_angle_random_rad REAL,\n tracking_obj VARCHAR(255),\n segmentation VARCHAR(255)\n\n\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating render database file\")\n except:\n print(\"Was not able to create render database file\")\n \n try: \n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS object_information(\n obj_filepath VARCHAR(255),\n obj_name VARCHAR(255),\n obj_type VARCHAR(255),\n obj_scale_factor REAL,\n obj_location_x REAL,\n obj_location_y REAL,\n obj_location_z REAL,\n obj_rotation_x REAL,\n obj_rotation_y REAL,\n obj_rotation_z REAL,\n obj_amount_percent REAL,\n obj_material_path VARCHAR(255),\n obj_point_in_time VARCHAR(255),\n maximum_random_rotation_degree_z REAL,\n maximum_random_translation REAL,\n random_amount REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating object database file\")\n except:\n print(\"Was not able to create object database file\")\n\n try: \n connection = sqlite3.connect(self.filepath_output_database)\n print(\"outputfilepath is:\", self.filepath_output_database)\n pointer = connection.cursor()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS objects(\n image_id REAL,\n object_name VARCHAR(255),\n object_type VARCHAR(255),\n object_amount REAL,\n object_location_x REAL,\n object_location_y REAL,\n object_location_z REAL,\n object_rotation_x REAL,\n object_rotation_y REAL,\n object_rotation_z REAL,\n object_dimensions_x REAL,\n object_dimensions_y REAL,\n object_dimensions_z REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS camera_settings(\n image_id REAL,\n image_variation REAL,\n camera_name VARCHAR(255),\n camera_location_x REAL,\n camera_location_y REAL,\n camera_location_z REAL,\n camera_rotation_x REAL,\n camera_rotation_y REAL,\n camera_rotation_z REAL,\n camera_focal_length REAL,\n camera_polar_angle REAL,\n camera_azimuth_angle REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS light_settings(\n image_id REAL,\n image_variation REAL,\n light_name VARCHAR(255),\n light_location_x REAL,\n light_location_y REAL,\n light_location_z REAL,\n light_rotation_x REAL,\n light_rotation_y REAL,\n light_rotation_z REAL,\n light_intensity REAL,\n light_polar_angle REAL,\n light_azimuth_angle REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS general_settings(\n image_id REAL,\n render_type VARCHAR(255),\n render_frame REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS bounding_boxes(\n image_id REAL,\n image_variation REAL,\n object_name VARCHAR(255),\n object_type VARCHAR(255),\n min_x REAL,\n max_x REAL,\n min_y REAL,\n max_y REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating output database file\")\n except:\n print(\"Was not able to create output database file\")", "def initialize_database():\n db = Database(database_name)\n i, m, u, p = db.fetch_needed_data()\n\n return i, m, u, p", "def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def __create_table(self):\n\n self.connection = self.db.connect()\n self.metadata = MetaData(self.connection)\n\n self.system = Table(self.table_name, self.metadata,\n Column('timestamp', DateTime(), primary_key=True, nullable=False),\n Column('vibration_sensor', Float()),\n Column('flow', Float()),\n Column('pressure', Float()),\n Column('power_consumption', Float()),\n Column('failure_times', Float()),\n Column('operational', Boolean())\n )\n\n self.metadata.create_all()", "def create_db(name=_db_indicators,\n indi_file=os.path.join('Source', 'codes_need.csv'),\n country_file=os.path.join('Source', 'work_countries.txt')):\n\n def create_indi_country(pdfI, con, mess, db_name, freq):\n if pdfI.shape[0]==0:\n return\n print('+' * 50, '{} WORKS'.format(mess), '+' * 50)\n\n pdfI.to_sql(cmm.strINDI_db_name, con, if_exists='replace')\n print('CREATE IMF.INDICATORS table for {} indicators'.format(pdfI.shape[0]))\n pdfC = get_countryes(db_name=db_name, country_txt_file=country_file)\n pdfC.to_sql(cmm.strCOUNTRY_db_name, con=con, if_exists='replace')\n print('CREATE IMF.COUNTRIES for {0} countries.'.format(pdfC.shape[0]))\n\n update_db(db_name=db_name, start=1970, end=2000)\n update_db(db_name=db_name, start=1999)\n\n cmm.create_views(db_name, freq=freq)\n\n pdf = cmm.read_indicators_from_csv(indi_file)\n print(indi_file)\n\n pdfQ = pdf[pdf['Freq']=='Q']\n pdfA = pdf[pdf['Freq'] == 'Y']\n pdfM = pdf[pdf['Freq'] == 'M']\n\n #pdfC = cmm.read_countries(file_name=country_file)\n\n nameA=cmm.db_name2annu(name)\n nameM = cmm.db_name2annu(name, suff='_M')\n\n coni = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=name))\n coniA = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameA))\n coniM = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameM))\n\n create_indi_country(pdfQ, coni, 'QUARTERLY', name, freq='Q')\n create_indi_country(pdfA, coniA, 'ANNUAL', nameA, freq='A')\n create_indi_country(pdfM, coniM, 'MONTHLY', nameM, freq='M')", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()" ]
[ "0.753115", "0.6986817", "0.6771315", "0.67573315", "0.6443865", "0.63336414", "0.63230395", "0.6307932", "0.6303793", "0.62672585", "0.6251016", "0.6219543", "0.62010366", "0.61712205", "0.6160077", "0.6145157", "0.60383976", "0.6035518", "0.6035307", "0.60166687", "0.6011914", "0.5976979", "0.5968856", "0.59316415", "0.59218", "0.5889934", "0.5888027", "0.5863287", "0.585774", "0.58497816", "0.5834029", "0.58237034", "0.58095956", "0.5793585", "0.5788244", "0.5784668", "0.57826006", "0.57734704", "0.5720794", "0.5690857", "0.5688561", "0.5683352", "0.5666827", "0.56634235", "0.56456286", "0.5634368", "0.5624422", "0.56020105", "0.5584837", "0.5582477", "0.5580616", "0.55791277", "0.5564623", "0.5553549", "0.55505073", "0.5544021", "0.5542836", "0.5519509", "0.5510465", "0.5508235", "0.5495786", "0.54933", "0.5492006", "0.5484443", "0.54807585", "0.5476533", "0.54735786", "0.54728097", "0.54597944", "0.5452989", "0.54525787", "0.5450085", "0.5448643", "0.54485667", "0.5445965", "0.54431033", "0.543697", "0.54342717", "0.542369", "0.54220575", "0.54198116", "0.5419669", "0.5416143", "0.54083467", "0.5405662", "0.5402131", "0.5395727", "0.539472", "0.5394006", "0.5393698", "0.53933764", "0.5391933", "0.538455", "0.5382807", "0.5379051", "0.53781664", "0.5378032", "0.53771746", "0.53763866", "0.5375355" ]
0.67318547
4
Provide string of addresses and this class will extract "street" & "house number".
def __init__(self, string): self.string = string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\n \n # pattern for the address in this website\n locality = address.find_all('span', class_='locality')\n city = locality[0].get_text().strip()\n if len(locality) > 1:\n city = locality[1].get_text().strip()\n state = address.find('span', class_='region').get_text().strip()\n zipcode = address.find('span', class_='postal-code').get_text().strip()\n return street, city, state, zipcode\n except:\n return street, city, state, zipcode", "def _get_address(self, soup):\n\n try:\n # from the content tag, extract the tag that contains all the address info\n address_tag = soup.find('div', class_='flex flex-col md:flex-row')\n # street tag\n street_tag = address_tag.find('h1', class_='h3')\n # street information\n street = street_tag.get_text()\\\n .strip()\\\n .replace(',', '')\n # region tag \n region_tag = address_tag.find('h5', class_='listing-card-location') \\\n .get_text() \\\n .strip() \\\n .split(' ')\n # city information\n city = region_tag[0].replace(',', '').title()\n # state information\n state = region_tag[1]\n # zipcode information\n zipcode = region_tag[2]\n\n return street, city, state, zipcode\n \n except:\n # return None if any of the above parts failed\n # if there's any part that's missing in the address part,\n # the whole address becomes useless\n return None, None, None, None", "def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)", "def get_address(self, list_item):\n Address = namedtuple('Address', ['addr', 'city', 'state', 'zip'])\n extract = [text for text in list_item.find('address').stripped_strings]\n\n # Sometimes a street address is not given\n if len(extract) == 1:\n addr, rest = None, extract[0]\n else:\n addr, rest = extract\n\n city, rest = rest.split(',')\n state, zip = rest.strip().split(' ')\n return Address(addr, city, state, zip)", "def _get_address(self, address_tag, hdr):\n\n # try to find all the span tags in the address tag, the span tags\n # include all the address information we need \n try:\n elements = address_tag.find_all('span')\n\n # scrape the text out of the span tags and remove\n # all the whitespaces and punctuation marks\n address = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n city = elements[1].get_text().strip()\n state = elements[2].get_text().strip()\n zipcode = elements[3].get_text().strip()\n return address, city, state, zipcode\n # however, sometimes the address tag does not include the street\n # info, in this case, use the text in the header tag, which serves\n # as a replacement for the address \n except:\n address = hdr.get_text()\n elements = address_tag.find_all('span')\n city = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n state = elements[1].get_text().strip()\n zipcode = elements[2].get_text().strip()\n return address, city, state, zipcode", "def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')", "def street_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"street_addresses\")", "def street_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"street_addresses\")", "def street_addresses(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"street_addresses\")", "def __getAddresses(parsed: BeautifulSoup) -> list:\n\n # Addresses container\n address_divs = parsed.find_all('div', class_='mailer')\n\n # Building RegEx for phone number\n # The following RegEx extracts phone numbers in the following formats:\n # 1. (###) ###-####\n # 2. ###-###-####\n # 3. ##########\n phone_number_regex = re.compile(\n r'(\\(\\d{3}\\) \\d{3}-\\d{4}|\\d{3}-\\d{3}-\\d{4}|\\d{10})')\n\n # List for final addresses\n addresses = list()\n\n for address in address_divs:\n # Create dict for address\n address_parsed = dict()\n # Split text by newline\n address_items = address.text.split('\\n')\n # Removing leading and trailing spaces\n address_items = [i.strip() for i in address_items]\n\n # Variable to store street address\n street_address = ''\n\n # Iterate through each line\n for idx, address_item in enumerate(address_items):\n # First line is address type\n if idx == 0:\n address_parsed['type'] = address_item\n continue\n\n # Check if line has phone number\n phone_matches = phone_number_regex.findall(address_item)\n if len(phone_matches) == 1:\n # Stripping non-digit characters from phone number\n phone_number = re.sub('[^0-9]', '', phone_matches[0])\n address_parsed['phone'] = phone_number\n continue\n \n # If no number, add to address line\n street_address += address_item.strip() + ' '\n \n # Adding street address to parsed address\n address_parsed['street_address'] = street_address.strip()\n\n # Adding parsed address to addresses master list\n addresses += [address_parsed]\n\n return addresses", "def get_apartment_address(self, soup, apartment_dict):\n\n info_class = soup.find_all('div', {'class': 'info'})\n if info_class and len(info_class) > 0:\n info_class = info_class[0]\n address = info_class.find('h2').text.strip()\n\n from parse import parse\n address = parse(\"Location: {}\", address)[0]\n apartment_dict['address'] = address\n else:\n logging.warning(\"Failed to parse apartment address\")\n return", "def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country", "def addresses(self):\n if 'Ward Matters' in self.topics or 'City Matters' in self.topics:\n stname_pattern = \"(\\S*[a-z]\\S*\\s){1,4}?\"\n sttype_pattern = \"(ave|blvd|cres|ct|dr|hwy|ln|pkwy|pl|plz|rd|row|sq|st|ter|way)\"\n st_pattern = stname_pattern + sttype_pattern\n\n addr_pattern = \"(\\d(\\d|-)*\\s%s)\" %st_pattern\n intersec_pattern = exp = \"((?<=\\sat\\s)%s\\s?and\\s?%s)\" %(st_pattern, st_pattern)\n\n pattern = \"(%s|%s)\" %(addr_pattern, intersec_pattern)\n\n matches = re.findall(pattern, self.description, re.IGNORECASE)\n\n addresses = [m[0] for m in matches]\n return addresses\n\n return []", "def _get_address(self, jdict):\n \n try:\n # access the location info dictionary\n loc_dict = jdict['props']['homeDetails']['location']\n state = loc_dict['stateCode']\n city = loc_dict['city']\n zipcode = loc_dict['zipCode']\n street = loc_dict['formattedLocation']\n return street, city, state, zipcode\n except:\n return None, None, None, None", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def street_address1(self) -> str:\n return pulumi.get(self, \"street_address1\")", "def address_regex(self) -> Any:", "def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")", "def toAddr(self, addressString: unicode) -> ghidra.program.model.address.Address:\n ...", "def getAddress(user):", "def street_address():\r\n\r\n return _random.choice(\r\n [\r\n '%d-%d %s' % (\r\n _random.randrange(999),\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%d %s' % (\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%s %d, %s' % (\r\n 'P.O. Box',\r\n _random.randrange(999),\r\n street_name()\r\n )\r\n ]\r\n )", "def address(self):\n return str(self.street) + str(self.city) + str(self.state) + str(self.zipcode)", "def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)", "def find_street(self, text):\n text = text.replace('\"', '')\n result = ''\n textarr = text.split()\n for i in range(0, len(textarr)):\n if textarr[i] in self.streets:\n # decide if the result is nearly empty or not to add the word with or without a space\n if len(result) < 2:\n result = result + textarr[i]\n else:\n result = result + ' ' + textarr[i]\n\n if result in self.streets:\n\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'an':\n return result\n else:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list)\n result = result.lstrip(' ')\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n elif i == len(textarr)-1:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list).lstrip(' ')\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n print(result)\n return None", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def address_str(self):\n return self._plrevgeoloc.addressString", "def extract_addresses(addresses):\n \n # Since lists are iterated over in Python in an orderly fashion, \n # put 'Input Reg' before 'Input' such that the 'Reg' in an address \n # with an 'Input Reg' prefix doesn't get left behind\n address_prefixes= [\"Input Reg \", \"Holding Reg \", \"Input \", \"Coil \"]\n\n \n for idx,address in enumerate(addresses):\n # Replace prefixes with empty string\n for prefix in address_prefixes:\n addresses[idx]=addresses[idx].replace(prefix, \"\")\n # Extract numeral\n try:\n addresses[idx]= int(addresses[idx])\n except:\n logging.warning(\"Invalid modbus address suppied at index {}\".format(idx))\n\n # Return\n return addresses", "def addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"addresses\")", "def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address", "def integrated_address_regex(self) -> Any:", "def search_address(query: str) -> Tuple[int, str]:\n\n url = 'https://api.n1.ru/api/v1/geo/geocoder/with_cities/'\n params = _search_params.copy()\n params['q'] = query\n\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n\n if not 'result' in response or not response['result']:\n raise NotFoundException('Result not found or empty.')\n \n address = None\n house_number = query.split(',')[-1].strip()\n for x in response['result']:\n if x['name_ru'].lower() == house_number:\n address = x\n break\n \n if address is None:\n raise NotFoundException(f'Not found house number {house_number} in result: {response[\"result\"]}')\n \n return address['street']['id'], address['name_ru']\n except requests.RequestException as e:\n raise ParserException(f'Fail make request. query: {query}') from e\n except NotFoundException as e:\n raise ParserException('Invalid result.') from e\n except (KeyError, IndexError) as e:\n raise ParserException(f'Fail get street id or house number. value: {response[\"result\"]}') from e", "def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def listToAddr(location):\n\n start_time = time.time()\n wk = [key for key in location.keys() if key in ('street', 'house_num', 'suburb', 'city', 'province', 'country', 'pos_code')]\n address = re.sub(',', '', ', '.join(value for value in dict(zip(wk, [location[k] for k in wk])).values() if value), 1)\n print('--- Tiempo de ejecucion listToAddr: {} segundos ---'.format((time.time() - start_time)))\n return address", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''", "def build_address(record):\n pass", "def get_postal_address(self):\n # print \"getting postal address\"\n address = []\n if self.name:\n address.append(self.name)\n if self.number:\n address.append(self.number + \" \" + self.street) # because building numbers and street names go on the same line\n elif self.street:\n address.append(self.street)\n if self.additional_street_address:\n address.append(self.additional_street_address)\n if self.site.post_town:\n address.append(self.site.post_town + \" \" + self.postcode)\n elif self.postcode:\n address.append(self.postcode)\n return address", "def street_address(self):\n if \"streetAddress\" in self._prop_dict:\n return self._prop_dict[\"streetAddress\"]\n else:\n return None", "def parse_address(self, address: str) -> Optional[Address]:\n raise NotImplemented", "def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()", "def find(self, text: unicode) -> ghidra.program.model.address.Address:\n ...", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def process_address(text):\n return sanitize(text[9:])", "def get_address_string(self):\n output = ''\n if self.address_line_1:\n output += '{}'.format(self.address_line_1)\n if self.address_line_2:\n output += ', {}'.format(self.address_line_2)\n if self.city:\n output += ', {}'.format(self.city)\n if self.state:\n output += ', {}'.format(self.state)\n if self.zipcode:\n output += ' {}'.format(self.zipcode)\n return output", "def test_get_address(self):\r\n note_data = self.tape.dataframe.iloc[0]\r\n note = self.tape._get_note_object(note_data)\r\n eq_(note.get_address(), '8 Brown St, Methuen, MA 01844')", "def extract_city(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n mid_comma_index = full_address.rindex(\",\", 0, last_comma_index)\n city = full_address[mid_comma_index + 1 : last_comma_index]\n city = city.strip()\n return city", "def fetch_address(cpr: str) -> str:\n\n return \"Åbogade 15, 8200 Aarhus N\"", "def capture_address_element(regex_object, full_address):\n full_address = normalise_address(full_address)\n capture_groups = regex_object.search(full_address)\n if capture_groups:\n return capture_groups.group(0)\n return ''", "def __str__(self):\n return format_address(**self._get_elements())", "def street_address2(self) -> Optional[str]:\n return pulumi.get(self, \"street_address2\")", "def test_address_other_parameters():\n address = lob.Address.create(name='Siddharth Saha', address_line1='104, Printing Boulevard',\n address_line2='Sunset Town', email='[email protected]',\n address_city='Boston', address_state='MA', address_country='US',\n address_zip='12345')\n print address.to_dict()", "def address(self):\n return self.data.get('address')", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode", "def extract_state(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n last_space_index = full_address.rindex(\" \")\n state = full_address[last_comma_index + 1 : last_space_index]\n state = state.strip()\n return state", "def street_address3(self) -> Optional[str]:\n return pulumi.get(self, \"street_address3\")", "def get_address(post_content):\n post_address = post_content.find(\"div\", {\"class\": \"mapaddress\"})\n address_attr = {\"address\": \"\"}\n if post_address is not None:\n address_attr[\"address\"] = post_address.text\n return address_attr", "def filter_string(self):\n logger.info(\"Information Gathering Finished!\")\n\n pattern_street = re.compile(r'[A-Za-z]+\\s?\\w+(?=\\s[Nn]o\\s\\d+$) |'\n r' [A-Za-z]+\\s?\\w+\\s?[A-Za-z]+\\s?[A-Za-z]+',\n re.X) # street pattern\n match_street = pattern_street.search(self.string)\n\n # If there are no house numbers provided in the input file,\n # print(not found) in the output JSON file\n numbers_instring = re.findall(r'\\d+', self.string) # digit counts in given string\n\n if len(numbers_instring) > 0:\n # In most cases we have: \"no\" followed by some digits\n pattern_housenumber = re.compile(r'(\\d+\\s?[A-Za-z]?$) |'\n r' (^\\d+) |'\n r' [Nn]o+[\\s?]+[0-9]+$',\n re.X) # house number pattern\n match_housenumber = pattern_housenumber.search(self.string)\n fin_housenumber = match_housenumber[0]\n else:\n match_housenumber = [\"not found\"]\n\n fin_housenumber = match_housenumber[0]\n fin_street = match_street[0]\n print(\"street: \", fin_street)\n print(\"housenumber: \", fin_housenumber)\n return {'street': fin_street, 'housenumber': fin_housenumber}", "def address_details(self) -> 'outputs.AddressDetailsResponse':\n return pulumi.get(self, \"address_details\")", "def address_line_1(self):\n return \"{} {} {}\".format(\n self.fake.randomize_nb_elements(1000),\n self.fake.last_name(),\n self.fake.random_element(elements=STREET_SUFFIX)\n )", "def unpack_addresses(self, addresses_to_test):\n if len(addresses_to_test) == 0:\n raise ValueError(\n \"There were no arguments passed to the function. That is wrong. Closing\"\n )\n\n return_addresses = []\n for address in addresses_to_test:\n if \"/\" in address:\n try:\n six_or_four = ipaddress.ip_network(address)\n except ValueError:\n print(f\"{address} is not a valid subnet. Skipping.\")\n continue\n for address_host in six_or_four.hosts():\n return_addresses.append(str(address_host))\n else:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n print(f\"{address} is not a valid address. Skipping.\")\n continue\n return_addresses.append(str(address))\n for address in return_addresses:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise ValueError(f\"{address} is not an IPv4/v6 address. Shutting Down\")\n if len(return_addresses) > 0:\n return return_addresses\n else:\n raise ValueError(\"No usable addresses to scan\")", "def address(self) -> tuple[str, int]:", "def format_address(line1, line2, city, state, zipcode):\n\t\n\tstreetlines = line1\n\tcityline = city\n\t\n\tif len(streetlines) > 0 and len(line2) > 0:\n\t\tstreetlines += \"\\n\"\n\t\n\tif len(cityline) > 0 and len(state) > 0:\n\t\tcityline += \", \"\n\t\n\tstreetlines += line2\n\tcityline += state\n\t\n\treturn \"\\n\".join([streetlines, cityline, zipcode])", "def street(self):\n if \"street\" in self._prop_dict:\n return self._prop_dict[\"street\"]\n else:\n return None", "def _cleanupAddress(self, address):\n clean = []\n \n # This is sort of a desultory effort but I'm not convinced \n # that these cleanups will actually result in cleaner searches\n for word in address.split(None):\n lower = word.lower()\n \n # Some things we just nuke\n if lower == 'at': continue\n elif lower == 'btw': continue\n elif lower == 'btwn': continue\n elif word.isdigit(): continue\n \n # Or we make substitiutions\n elif lower == 'st' or lower == 'st.':\n word = 'Street'\n elif lower == 'ave' or lower == 'ave.':\n word = 'Avenue'\n elif lower == 'pl' or lower == 'pl.':\n word = 'Place'\n elif lower == 'n': word = 'North'\n elif lower == 'e': word = 'East'\n elif lower == 's': word = 'South'\n elif lower == 'w': word = 'West'\n \n clean.append(word)\n return ' '.join(clean)", "def get_address(self):\n \n if \"'\" in self.data.get(\"AddressInfo\").get(\"AddressLine1\") :\n self.data.get(\"AddressInfo\").get(\"AddressLine1\").replace(\"'\",\"\")\n\n return self.data.get(\"AddressInfo\").get(\"AddressLine1\")", "def getRestaurantAddresses(restaurants):\n addresslist = []\n for rest in restaurants:\n if 'address' in rest:\n addressstring = str(rest['address']) + ' ' + str(rest['city'])\n addresslist.append(addressstring)\n\n # pprint.pprint(addresslist)\n return addresslist", "def normalize_address(patched_address: OrderedDict[str, str]) -> location.Address:\n\n address_kwargs = {\n # \"street1\",\n # \"city\",\n # \"state\",\n # \"zip\"\n }\n street_buffer: List[str] = []\n suite_buffer: List[str] = []\n while len(patched_address) > 0:\n component, value = patched_address.popitem(last=False)\n if component == \"PlaceName\":\n address_kwargs[\"city\"] = value\n elif component == \"StateName\":\n address_kwargs[\"state\"] = value\n elif component == \"ZipCode\":\n address_kwargs[\"zip\"] = value\n elif component == \"OccupancyType\":\n suite_buffer.append(value)\n elif component == \"OccupancyIdentifier\":\n suite_buffer.append(value)\n else:\n street_buffer.append(value)\n address_kwargs[\"street1\"] = \" \".join(street_buffer)\n if len(suite_buffer) > 0:\n address_kwargs[\"street2\"] = \" \".join(suite_buffer)\n\n return location.Address(**address_kwargs)", "def test_address(residence):\n # We start by creating our result list\n result = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n # Then we split our given string so we only have the address left\n residence = residence.split(\", \")[1]\n\n # First we check for the prefix and increase the according value\n if \"Haupt\" in residence:\n result[0] = 1\n elif \"Schul\" in residence:\n result[1] = 1\n elif \"Garten\" in residence:\n result[2] = 1\n elif \"Dorf\" in residence:\n result[3] = 1\n elif \"Bahnhof\" in residence:\n result[4] = 1\n elif \"Wiesen\" in residence:\n result[5] = 1\n elif \"Berg\" in residence and residence[4] in (\"s\", \"w\", \"a\", \"r\", \"p\"):\n result[6] = 1\n elif \"Kirch\" in residence:\n result[7] = 1\n elif \"Wald\" in residence:\n result[8] = 1\n elif \"Ring\" == residence[0:4]:\n result[9] = 1\n else:\n result[10] = 1\n\n # Now we check the suffix\n if \"straße\" in residence or \"Straße\" in residence:\n result[11] = 1\n elif \"Weg\" in residence or \"weg\" in residence:\n result[12] = 1\n elif \"Allee\" in residence or \"allee\" in residence:\n result[13] = 1\n elif \"platz\" in residence or \"platz\" in residence:\n result[15] = 1\n else:\n result[14] = 1\n\n # And now we check the number\n number = int(residence.split()[-1])\n if number > 999:\n result[16] = 1\n elif number > 99:\n result[17] = 1\n else:\n result[18] = 1\n\n return result", "def get_address(self):\n entity = self\n if entity.abstract_entity:\n entity = self.get_real_ancestor()\n if entity:\n address = entity.get_institutional_address()\n building = entity.get_building()\n if building:\n if entity.building_recapitulates_entity_name: \n address.extend(building.get_postal_address()[1:])\n else:\n address.extend(building.get_postal_address())\n return address", "def test_address_other_parameters():\n address = lob.Address.create(name = 'Siddharth Saha', address_line1 = '104, Printing Boulevard',\n address_line2 = 'Sunset Town', email = '[email protected]', \n address_city = 'Boston', address_state = 'MA', address_country = 'US',\n address_zip = '12345')\n print address.to_dict()", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def __str__(self):\n if self._street_name != self.DEFAULT_STREET_NAME and \\\n self._house_num != self.DEFAULT_HOUSE_NUM and \\\n self._apt_num != self.DEFAULT_APT_NUM:\n address = f\"\\n{self._house_num} {self._street_name} Street, \" \\\n f\"#{self._apt_num}\"\n return address\n else:\n return \"<None>\"", "def get_address(self):\n if self.get_entity: # needs an entity to work\n if self.building:\n address = self.get_entity.get_institutional_address()\n address.extend(self.building.get_postal_address())\n return address\n else:\n return self.get_entity.get_address()", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def suggestions(self, input, borough_code=None):\n parsed = parser.address(input)\n if borough_code:\n parsed['BOROUGH_CODE'] = borough_code\n self.similiar_names = []\n self.results = []\n if parsed['PHN'] and parsed['STREET']:\n if not parsed['BOROUGH_CODE'] and not parsed['ZIP']:\n # iterate borocodes\n for x in range(1, 6):\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=x)\n # try address with borough code if present\n elif parsed['BOROUGH_CODE']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=parsed['BOROUGH_CODE'])\n # try address with zip code if present\n elif parsed['ZIP']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], zip=parsed['ZIP'])\n # validate and retrieve any addresses\n if len(self.similiar_names):\n for name in self.similiar_names:\n self._geocode(phn=parsed['PHN'], street=name['street'], borough_code=name['borough_code'])\n if None in self.results:\n self.results = list(filter(lambda v: v is not None, self.results))\n\n return self.results", "def address1(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address1\")", "def address_str(self) -> str | None:\n pass", "def _formatting_address_fields(self):\n return self._address_fields()", "def format_address(**args):\n #Begin with the organisation and PO Box number, if applicable.\n address = ''.join([args[entry] + '\\n' \n for entry in ['organisation', 'PO box']\n if args.get(entry)])\n #Format building name/number components.\n address += format_building_components(*[args.get(x) for x in \n ['sub-building name', \n 'building name', \n 'building number',\n 'concatenation indicator']])\n #Add thoroughfare (if present), locality/town and postcode.\n address += ''.join([args[entry] + '\\n' \n for entry in ['dependent thoroughfare', \n 'thoroughfare',\n 'double dependent locality',\n 'dependent locality',\n 'town',\n 'postcode']\n if args.get(entry)])\n return address.strip()", "def format_address(addresses: 'SequenceOrScalar[Address | str]') -> str:\n if isinstance(addresses, (Address, str)):\n return format_single_address(addresses)\n\n assert len(addresses) <= 50\n return ', '.join(format_single_address(a) for a in addresses)", "def _check_address(self):\n for object_ in self.objects:\n if object_.object_name.endswith(' ЕС'):\n if object_.object_address[:6].isnumeric():\n object_.object_address = \\\n object_.object_address[:7] + \\\n object_.object_fed_subj + ', ' + \\\n object_.object_address[7:]", "def get_address(query):\n address = \"Dis-moi, quel endroit tu cherches ?\"\n data = get_data(query)\n try:\n address_data = data[\"results\"][0][\"formatted_address\"]\n address = (\"Si je ne me trompe pas,\"\n \" l'adresse que tu cherche, c'est ... \" + address_data + \". Sinon\"\n \", dis-moi le nom de lieu exact\")\n except IndexError:\n address = \"Désolé, je n'ai pas compris quel endroit tu cherches ?\"\n finally:\n return address", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def readHouseAddresses():\n addressesRead = []\n with open(\"Files/HouseAddresses.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n addressesRead.append(address)\n f.close()\n return addressesRead", "def nomad_address():\n\n print(nomad.get_address())", "def get_address(self, ):\n return self.get_parameter('address')", "def street(self):\n return self._street", "def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'", "def parse_address_from_geocoding_response(geocoded_data: dict) -> str:\n return geocoded_data[\n 'response'][\n 'GeoObjectCollection'][\n 'featureMember'][0][\n 'GeoObject'][\n 'metaDataProperty'][\n 'GeocoderMetaData'][\n 'text']", "def test_get_order_address(self):\n pass", "def load_addresses():\n with open('addresses.txt') as f:\n return [address.strip() for address in f.readlines()]", "def do_getaddress(self,args):\n ppdict(bitstamp.get_depositaddress())", "def _addressitem_from_line(line):\n sline = line.split(\"\\t\")\n if len(sline) < 2:\n raise IOError(\"Error parsing address from line. Malformed data.\")\n address = sline[0]\n name = sline[1]\n\n if len(sline) > 2:\n otherinfo = sline[2]\n else:\n otherinfo = \"\"\n if len(sline) > 3:\n extrainfo = sline[3]\n else:\n extrainfo = \"\"\n if len(sline) > 4:\n raw_misc = sline[4:]\n misc = _raw_misc_to_dict(raw_misc)\n else:\n misc = {}\n\n return AddressItem(\n _email_address=address,\n _name=name,\n _otherinfo=otherinfo,\n _extrainfo=extrainfo,\n **misc\n )", "def _parse_location(self, response):\n loc_parts = [\n re.sub(r\"\\s+\", \" \", part).strip()\n for part in response.css(\n \"#contact-info .right-col-content .content *::text\"\n ).extract()\n if part.strip()\n ]\n return {\n \"name\": loc_parts[3],\n \"address\": \" \".join(loc_parts[4:]).replace(\" ,\", \",\").strip(),\n }" ]
[ "0.7285767", "0.7073066", "0.68373525", "0.6803627", "0.6702961", "0.66992515", "0.6645722", "0.6645722", "0.66008115", "0.6555697", "0.65275055", "0.64732426", "0.643964", "0.64220065", "0.6419511", "0.63993114", "0.63021076", "0.62145597", "0.6205807", "0.62011707", "0.61928886", "0.61835444", "0.61770445", "0.61426693", "0.61411965", "0.6136066", "0.6118628", "0.6103913", "0.60953367", "0.60859036", "0.60694325", "0.6049867", "0.6037654", "0.6006664", "0.5993662", "0.599296", "0.5984353", "0.59528565", "0.59359413", "0.59303993", "0.5898929", "0.58695227", "0.5835085", "0.5827997", "0.5818895", "0.58097136", "0.5797164", "0.57949907", "0.5775367", "0.5765253", "0.57513815", "0.57500416", "0.57481587", "0.5725516", "0.5725516", "0.5725516", "0.572122", "0.5713988", "0.5713988", "0.57049704", "0.5700431", "0.56987596", "0.56768906", "0.567477", "0.5666773", "0.5662513", "0.56544566", "0.563921", "0.56373787", "0.5634221", "0.5623397", "0.5621658", "0.5620116", "0.55916774", "0.5589411", "0.5588374", "0.5587982", "0.5580453", "0.55790925", "0.55727684", "0.55562145", "0.5553777", "0.5553763", "0.5546229", "0.5545192", "0.5538874", "0.55388194", "0.55354", "0.5528123", "0.5509381", "0.55091566", "0.55088073", "0.5508467", "0.54950005", "0.5482753", "0.5481104", "0.5467985", "0.5465495", "0.5461417", "0.5461046", "0.54587245" ]
0.0
-1
Given the input string, tries to separate them into "street" & "house number" variables
def filter_string(self): logger.info("Information Gathering Finished!") pattern_street = re.compile(r'[A-Za-z]+\s?\w+(?=\s[Nn]o\s\d+$) |' r' [A-Za-z]+\s?\w+\s?[A-Za-z]+\s?[A-Za-z]+', re.X) # street pattern match_street = pattern_street.search(self.string) # If there are no house numbers provided in the input file, # print(not found) in the output JSON file numbers_instring = re.findall(r'\d+', self.string) # digit counts in given string if len(numbers_instring) > 0: # In most cases we have: "no" followed by some digits pattern_housenumber = re.compile(r'(\d+\s?[A-Za-z]?$) |' r' (^\d+) |' r' [Nn]o+[\s?]+[0-9]+$', re.X) # house number pattern match_housenumber = pattern_housenumber.search(self.string) fin_housenumber = match_housenumber[0] else: match_housenumber = ["not found"] fin_housenumber = match_housenumber[0] fin_street = match_street[0] print("street: ", fin_street) print("housenumber: ", fin_housenumber) return {'street': fin_street, 'housenumber': fin_housenumber}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber", "def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)", "def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")", "def extract_unit_num_street(self, address):\n\n unit = street_num = street = None\n\n address_parts = self.clean(address).split()\n\n try:\n street_num = int(address_parts[1])\n street = address_parts[2]\n unit = int(address_parts[0])\n\n except IndexError:\n raise InvalidAddress(address)\n\n except ValueError:\n\n try:\n street_num = int(address_parts[0])\n except ValueError:\n # Not even a number\n raise InvalidAddress(address)\n\n street = address_parts[1]\n\n return (unit, street_num, street)", "def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type", "def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)", "def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()", "def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''", "def extract_state(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n last_space_index = full_address.rindex(\" \")\n state = full_address[last_comma_index + 1 : last_space_index]\n state = state.strip()\n return state", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'", "def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address", "def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix", "def find_street(self, text):\n text = text.replace('\"', '')\n result = ''\n textarr = text.split()\n for i in range(0, len(textarr)):\n if textarr[i] in self.streets:\n # decide if the result is nearly empty or not to add the word with or without a space\n if len(result) < 2:\n result = result + textarr[i]\n else:\n result = result + ' ' + textarr[i]\n\n if result in self.streets:\n\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'an':\n return result\n else:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list)\n result = result.lstrip(' ')\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n elif i == len(textarr)-1:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list).lstrip(' ')\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n print(result)\n return None", "def extract_city(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n mid_comma_index = full_address.rindex(\",\", 0, last_comma_index)\n city = full_address[mid_comma_index + 1 : last_comma_index]\n city = city.strip()\n return city", "def split_address(address):\n if '://' in address:\n protocol, address = address.split('://')\n else:\n protocol = 'http'\n\n if ':' in address:\n address, port = address.split(':')\n else:\n port = 443 if protocol == 'https' else 8000\n\n return protocol, address, int(port)", "def parse_place_notation(input_string: str) -> Tuple[int, str]:\n\n # Looking for a string that matches <stage>:<place notation> where the\n # place notation is a series of bell numbers and 'x' characters\n parts = input_string.split(\":\")\n if len(parts) == 2:\n stage_part = parts[0]\n if len(stage_part) == 0 or not stage_part.isnumeric():\n raise PlaceNotationError(input_string, \"Stage must be a number\")\n stage = int(stage_part)\n place_notation = parts[1]\n if not valid_pn(place_notation):\n raise PlaceNotationError(input_string, \"Place notation is invalid\")\n else:\n raise PlaceNotationError(input_string, \"<stage>:<place notation> required\")\n\n return stage, place_notation", "def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p", "def search_address(query: str) -> Tuple[int, str]:\n\n url = 'https://api.n1.ru/api/v1/geo/geocoder/with_cities/'\n params = _search_params.copy()\n params['q'] = query\n\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n\n if not 'result' in response or not response['result']:\n raise NotFoundException('Result not found or empty.')\n \n address = None\n house_number = query.split(',')[-1].strip()\n for x in response['result']:\n if x['name_ru'].lower() == house_number:\n address = x\n break\n \n if address is None:\n raise NotFoundException(f'Not found house number {house_number} in result: {response[\"result\"]}')\n \n return address['street']['id'], address['name_ru']\n except requests.RequestException as e:\n raise ParserException(f'Fail make request. query: {query}') from e\n except NotFoundException as e:\n raise ParserException('Invalid result.') from e\n except (KeyError, IndexError) as e:\n raise ParserException(f'Fail get street id or house number. value: {response[\"result\"]}') from e", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\n \n # pattern for the address in this website\n locality = address.find_all('span', class_='locality')\n city = locality[0].get_text().strip()\n if len(locality) > 1:\n city = locality[1].get_text().strip()\n state = address.find('span', class_='region').get_text().strip()\n zipcode = address.find('span', class_='postal-code').get_text().strip()\n return street, city, state, zipcode\n except:\n return street, city, state, zipcode", "def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode", "def func2():\n\n\tnums = '''\n\t\t800-555-1212\n\t\t800 555 1212\n\t\t800.555.1212\n\t\t(800) 555-1212\n\t\t1-800-555-1212\n\t\t800-555-1212-1234\n\t\t800-555-1212x1234\n\t\t800-555-1212 ext. 1234\n\t\twork 1-(800) 555.1212 #1234\n\t'''\n\n\tpattern = r\"(\\d{3})\\D*(\\d{3})\\D*(\\d{4})\\D*(\\d*)$\"\n\tregex = re.compile(pattern)\n\n\tprint regex.search(\"800-555-1212\").groups()\n\tprint regex.search(\"800 555 1212\").groups()\n\tprint regex.search(\"800.555.1212\").groups()\n\n\tprint regex.search(\"(800) 555-1212\").groups()\n\tprint regex.search(\"1-800-555-1212\").groups()\n\n\tprint regex.search(\"800-555-1212-1234\").groups()\n\tprint regex.search(\"800-555-1212x1234\").groups()\n\n\tprint regex.search(\"800-555-1212 ext. 1234\").groups()\n\tprint regex.search(\"work 1-(800) 555.1212 #1234\").groups()", "def split_well_name (well_name):\n\n letters = well_name.rstrip('0123456789')\n\n nums = well_name.lstrip(letters)\n\n\n #Do some checks to make sure it's a well name in for the format letter-letter-#-#\n if len(nums) == 0:\n raise ValueError('Something is wrong with your input, I cannot find a row number')\n\n\n for i in '0123456789':\n if i in letters:\n raise ValueError('Something is wrong with your input, I think there is a number in your column letter.')\n\n for j in nums:\n if j not in '0123456789':\n raise ValueError('Something is wrong with your input, I think there is a letter in your row number.')\n\n return letters, nums", "def street_address():\r\n\r\n return _random.choice(\r\n [\r\n '%d-%d %s' % (\r\n _random.randrange(999),\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%d %s' % (\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%s %d, %s' % (\r\n 'P.O. Box',\r\n _random.randrange(999),\r\n street_name()\r\n )\r\n ]\r\n )", "def _get_address(self, soup):\n\n try:\n # from the content tag, extract the tag that contains all the address info\n address_tag = soup.find('div', class_='flex flex-col md:flex-row')\n # street tag\n street_tag = address_tag.find('h1', class_='h3')\n # street information\n street = street_tag.get_text()\\\n .strip()\\\n .replace(',', '')\n # region tag \n region_tag = address_tag.find('h5', class_='listing-card-location') \\\n .get_text() \\\n .strip() \\\n .split(' ')\n # city information\n city = region_tag[0].replace(',', '').title()\n # state information\n state = region_tag[1]\n # zipcode information\n zipcode = region_tag[2]\n\n return street, city, state, zipcode\n \n except:\n # return None if any of the above parts failed\n # if there's any part that's missing in the address part,\n # the whole address becomes useless\n return None, None, None, None", "def test_address(residence):\n # We start by creating our result list\n result = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n # Then we split our given string so we only have the address left\n residence = residence.split(\", \")[1]\n\n # First we check for the prefix and increase the according value\n if \"Haupt\" in residence:\n result[0] = 1\n elif \"Schul\" in residence:\n result[1] = 1\n elif \"Garten\" in residence:\n result[2] = 1\n elif \"Dorf\" in residence:\n result[3] = 1\n elif \"Bahnhof\" in residence:\n result[4] = 1\n elif \"Wiesen\" in residence:\n result[5] = 1\n elif \"Berg\" in residence and residence[4] in (\"s\", \"w\", \"a\", \"r\", \"p\"):\n result[6] = 1\n elif \"Kirch\" in residence:\n result[7] = 1\n elif \"Wald\" in residence:\n result[8] = 1\n elif \"Ring\" == residence[0:4]:\n result[9] = 1\n else:\n result[10] = 1\n\n # Now we check the suffix\n if \"straße\" in residence or \"Straße\" in residence:\n result[11] = 1\n elif \"Weg\" in residence or \"weg\" in residence:\n result[12] = 1\n elif \"Allee\" in residence or \"allee\" in residence:\n result[13] = 1\n elif \"platz\" in residence or \"platz\" in residence:\n result[15] = 1\n else:\n result[14] = 1\n\n # And now we check the number\n number = int(residence.split()[-1])\n if number > 999:\n result[16] = 1\n elif number > 99:\n result[17] = 1\n else:\n result[18] = 1\n\n return result", "def extract_alleles_from_snp_string(snp_string):\n (allele1, allele2) = snp_string[1:4].split(\"/\")\n assert allele1 in \"ATGC\"\n assert allele2 in \"ATGC\"\n return allele1, allele2", "def parse_address(address, sanity=True):\n address = address.split(':')\n address, port = ':'.join(address[:-1]), address[-1]\n\n guessed_type = 4\n if address.startswith('['):\n address = address[1:]\n guessed_type = 6\n if address.endswith(']') or (sanity and guessed_type == 6):\n if sanity:\n assert address.endswith(']')\n address = address[:-1]\n guessed_type = 6\n if address.count(':') > 3:\n if sanity:\n assert guessed_type == 6\n guessed_type = 6\n\n return address, int(port), guessed_type", "def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"", "def _split_proxy_info(data: str) -> list:\n \n country = data[:2]\n anonymity = data[3:4]\n type_ = data[4:].strip('-+ ') # Remove splitting (- and space) and google_passed flag (+)\n google_passed = data[-1]\n\n return [country, anonymity, type_, google_passed]", "def _parse_user_input(self):\n user_input = self.user_input.strip()\n if user_input:\n if user_input.find(',') > -1:\n # Location is either city/state or latitude/longitude.\n if user_input[0].isalpha():\n # City, state (lat/long handled elsewhere)\n city, state = [x.strip() for x in user_input.split(',')]\n self.city = city\n self.state = state\n elif (len(user_input) <= 10 and\n user_input[1].isdigit()): # 2nd char in US/Can. postal codes\n # Postal code\n self.postal_code = user_input.strip()", "def normalise_address(address):\n return re.sub('\\s+', ' ', str(address).upper()).replace(' ,', ',')", "def return_street(streetname):\r\n if streetname == None:\r\n return streetname\r\n if streetname.split(\" \")[-1] in valid_suffix:\r\n return \" \".join(str(streetname).split(\" \")[:-1])\r\n\r\n return streetname", "def parse_streetdir(self):\n \n first = self.words[self.index]['word']\n if self.index + 1 < self.length:\n second = self.words[self.index+1]['word']\n else:\n second = None\n \n if first in ['northwest', 'northeast', 'southwest', 'southeast']:\n return first, 1 \n elif first == 'nw':\n return \"northwest\", 1\n elif first == 'ne':\n return \"northeast\", 1\n elif first == 'sw':\n return \"southwest\", 1\n elif first == 'se':\n return \"southeast\", 1\n \n if first in ['n', 'north']:\n if second in ['w', 'west']:\n return \"northwest\", 2\n elif second in ['e', 'east']:\n return \"northeast\", 2\n else:\n return \"north\", 1\n elif first in ['s', 'south']:\n if second in ['w', 'west']:\n return \"southwest\", 2\n elif second in ['e', 'east']:\n return \"southeast\", 2\n else:\n return \"south\", 1\n elif first in ['e', 'east']:\n return \"east\", 1\n elif first in ['w', 'west']:\n return \"west\", 1\n \n return None,0", "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def parse_proasis(input_string):\n return (\n input_string[:3].strip(),\n int(input_string[5:].strip()),\n input_string[3:5].strip(),\n )", "def parse_input(input: str) -> Tuple[int, int, int, int]:\n head, tail = input.split(\"@ \")\n points, measures = tail.split(\": \")\n x, y = points.split(\",\")\n w, h = measures.split(\"x\")\n return int(x), int(y), int(w), int(h)", "def parse_station_name (station_name):\n try:\n _,chinese_name,code,full_pinyin,short_pinyin = station_name.split('|')\n except ValueError:\n # print(station_name)\n _,chinese_name,code,full_pinyin,short_pinyin,_ = station_name.split('|')\n return {chinese_name:code,full_pinyin:code,short_pinyin:code}", "def _get_address(self, address_tag, hdr):\n\n # try to find all the span tags in the address tag, the span tags\n # include all the address information we need \n try:\n elements = address_tag.find_all('span')\n\n # scrape the text out of the span tags and remove\n # all the whitespaces and punctuation marks\n address = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n city = elements[1].get_text().strip()\n state = elements[2].get_text().strip()\n zipcode = elements[3].get_text().strip()\n return address, city, state, zipcode\n # however, sometimes the address tag does not include the street\n # info, in this case, use the text in the header tag, which serves\n # as a replacement for the address \n except:\n address = hdr.get_text()\n elements = address_tag.find_all('span')\n city = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n state = elements[1].get_text().strip()\n zipcode = elements[2].get_text().strip()\n return address, city, state, zipcode", "def __init__(self, zip_code, house_number, house_addition=\"\"):\n self.zip_code = zip_code.replace(' ', '')\n self.house_number = house_number.strip()\n self.house_addition = house_addition.strip()", "def get_string_info(string):\n line_count = 1\n column_count = 1\n for char in string:\n if char == '\\n':\n column_count = 1\n line_count += 1\n else:\n column_count += 1\n return Coords(line_count, column_count, len(string))", "def _split_region(region: str) -> Tuple[bool, str, Optional[str]]:\n parts = region.lower().split(\"-\")\n\n if parts[0] == \"vip\":\n del parts[0]\n vip = True\n else:\n vip = False\n\n country = parts.pop(0)\n\n try:\n country_part = parts.pop(0)\n except IndexError:\n country_part = None\n\n return vip, country, country_part", "def normalize_address(address):\n # Fix 'Place/Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]/[a-zA-Z0-9]', address):\n address = address.replace('/', ' & ')\n # Fix 'Place:Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]:[a-zA-Z0-9]', address):\n address = address.replace(':', ' & ')\n # Fix 'RD' -> 'Rd' & 'PK' -> 'Pk'\n if re.findall(r'[PRSA][KDTV]', address):\n address = re.sub(r'([PRSA][KDTV])', \\\n lambda x: x.group(0).title(), address)\n # Fix 'Bl' -> 'Blvd'\n if re.findall(r'(Bl)[\\ ]', address):\n address = address.replace('Bl', 'Blvd')\n # Fix 'w 156th' -> 'W 156th'\n if re.findall(r'[^a-zA-Z][wnse][/ ]', address):\n address = re.sub(r'[^a-zA-Z]([wnse])[/ ]', \\\n lambda x: x.group(0).upper(), address)\n # Fix '151 St' -> '151st St'\n if re.findall(r'[0-9][\\ ][SA][tv]', address):\n address = re.sub(r'[0-9]+', \\\n ordinal_conversion, address)\n return address", "def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y", "def extract_address(input_string):\n if input_string:\n addr_search = re.search(\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(?:/\\d{1,2}|)',\n input_string)\n\n if bool(addr_search):\n return addr_search.group()\n\n return None", "def split_address(address: str, prefix=\"address_\", max_lines=5) -> dict[str, str]:\n\n # TODO ICMSLST-1692: ILB to fix addresses which are more than 5 lines\n return {\n f\"{prefix}{i}\": address_line.strip()\n for i, address_line in enumerate(address.split(\"\\n\"), start=1)\n if address_line and address_line.strip() and i <= max_lines\n }", "def seperate_City_State_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n v = list(dictionary.values())\n values = []\n res = []\n for i in range(len(keys)):\n state = tmp[i][1].strip()\n city = tmp[i][0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append((state, city))\n values.append(v[i])\n return res, list(values)", "def parse_feature_value(s,next_index=0):\n next_index = jump_over_space(s,next_index)\n start_index = next_index\n while True:\n if not s[next_index].isspace():\n next_index += 1\n else:\n break\n feature_value = s[start_index:next_index]\n if feature_value == '':\n feature_value = None\n feature_value = feature_value.split('/')\n return (feature_value,next_index)", "def process_address(text):\n return sanitize(text[9:])", "def canon_station_name(s, line):\n s = s.strip()\n s = re.sub('^Heathrow$', 'Heathrow Terminals 1, 2, 3', s)\n s = re.sub('^Olympia$', 'Kensington (Olympia)', s)\n s = re.sub('^Warwick Ave$', 'Warwick Avenue', s)\n s = re.sub('^Camden$', 'Camden Town', s)\n s = re.sub('^Central$', 'Finchley Central', s) # They say \"Between Central and East Finchley\"\n s = re.sub('\\s*Platform \\d$', '', s)\n s = s + ' Station'\n s = s.replace('(Bakerloo)', 'Bakerloo').replace('Earls', 'Earl\\'s') \\\n .replace(' fast ', ' ') \\\n .replace('\\xe2\\x80\\x99', \"'\") \\\n .replace('St ', 'St. ') \\\n .replace('Elephant and Castle', 'Elephant &amp; Castle') \\\n .replace('Lambeth Station', 'Lambeth North Station') \\\n .replace('Chalfont Station', 'Chalfont &amp; Latimer Station') \\\n .replace('West Brompon', 'West Brompton') \\\n .replace('Picadilly Circus', 'Piccadilly Circus') \\\n .replace('High Barent', 'High Barnet') \\\n .replace('Bartnet', 'Barnet') \\\n .replace('Faringdon', 'Farringdon') \\\n .replace('Turnham Greens', 'Turnham Green') \\\n .replace('Ruilsip', 'Ruislip') \\\n .replace('Dagemham', 'Dagenham') \\\n .replace('Edgware Road (H &amp; C)', 'Edgware Road Circle') \\\n .replace('Hammersmith (Circle and H&amp;C)', 'Hammersmith') \\\n .replace('Shepherds Bush (Central Line)', \"Shepherd's Bush\") \\\n .replace('Terminals 123', 'Terminals 1, 2, 3').replace('Terminal 1,2,3', 'Terminals 1, 2, 3') \\\n .replace('Woodford Junction', 'Woodford') \\\n .replace(\"King's Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace(\"Kings Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace('Central Finchley', 'Finchley Central').replace('District and Picc', 'D &amp; P') \\\n .replace('South Fields', 'Southfields') \\\n .replace('Regents Park', \"Regent's Park\") \\\n .replace('Bromley-by-Bow', \"Bromley-By-Bow\") \\\n .replace('Brent Oak', 'Burnt Oak') \\\n .replace('St. Johns Wood', \"St. John's Wood\") \\\n .replace('Totteridge and Whetstone', 'Totteridge &amp; Whetstone') \\\n .replace('Newbury Park Loop', 'Newbury Park') \\\n .replace('Harrow-on-the-Hill', 'Harrow on the Hill')\n if s == 'Edgware Road Station' and line == 'B':\n s = 'Edgware Road Bakerloo Station'\n if s == 'Edgware Road Station' and line != 'B':\n s = 'Edgware Road Circle Station'\n return s", "def get_by_altitute():\n try:\n lat, long = input(\"Enter Lat and Long of a place: \").split()\n return lat, long\n except ValueError:\n print(\"Error while getting values.\")\n get_by_altitute()", "def extract_addresses(addresses):\n \n # Since lists are iterated over in Python in an orderly fashion, \n # put 'Input Reg' before 'Input' such that the 'Reg' in an address \n # with an 'Input Reg' prefix doesn't get left behind\n address_prefixes= [\"Input Reg \", \"Holding Reg \", \"Input \", \"Coil \"]\n\n \n for idx,address in enumerate(addresses):\n # Replace prefixes with empty string\n for prefix in address_prefixes:\n addresses[idx]=addresses[idx].replace(prefix, \"\")\n # Extract numeral\n try:\n addresses[idx]= int(addresses[idx])\n except:\n logging.warning(\"Invalid modbus address suppied at index {}\".format(idx))\n\n # Return\n return addresses", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')", "def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh", "def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh", "def _cleanupAddress(self, address):\n clean = []\n \n # This is sort of a desultory effort but I'm not convinced \n # that these cleanups will actually result in cleaner searches\n for word in address.split(None):\n lower = word.lower()\n \n # Some things we just nuke\n if lower == 'at': continue\n elif lower == 'btw': continue\n elif lower == 'btwn': continue\n elif word.isdigit(): continue\n \n # Or we make substitiutions\n elif lower == 'st' or lower == 'st.':\n word = 'Street'\n elif lower == 'ave' or lower == 'ave.':\n word = 'Avenue'\n elif lower == 'pl' or lower == 'pl.':\n word = 'Place'\n elif lower == 'n': word = 'North'\n elif lower == 'e': word = 'East'\n elif lower == 's': word = 'South'\n elif lower == 'w': word = 'West'\n \n clean.append(word)\n return ' '.join(clean)", "def part(string):\n characters = [] \n count = 0\n st = \"\"\n for char in string:\n if char == \" \":\n characters.append(\"space\")\n elif char ==\"?\" or char == \",\" or char == \";\" or char == \":\":\n characters.append(char)\n count = 0\n else:\n # st += char\n # count += 1\n characters.append(char)\n \n if count == 3:\n characters.append(st)\n st = \"\"\n count = 0\n return characters", "def check_born_place(input_string: str) -> tuple:\n c = input_string[0]\n if c == 'A':\n return 'Taipei City', 10\n elif c == 'B':\n return 'Taichung City', 11\n elif c == 'C':\n return 'Keelung City', 12\n elif c == 'D':\n return 'Tainan City', 13\n elif c == 'E':\n return 'Kaohsiung City', 14\n elif c == 'F':\n return 'New Taipei City', 15\n elif c == 'G':\n return 'Yilan County', 16\n elif c == 'H':\n return 'Taoyuan City', 17\n elif c == 'I':\n return 'Chiayi City', 34\n elif c == 'J':\n return 'Hsinchu County', 18\n elif c == 'K':\n return 'Miaoli County', 19\n elif c == 'L':\n return 'Taichung County', 20\n elif c == 'M':\n return 'Nantou County', 21\n elif c == 'N':\n return 'Changhua County', 22\n elif c == 'O':\n return 'Hsinchu City', 35\n elif c == 'P':\n return 'Yunlin County', 23\n elif c == 'Q':\n return 'Chiayi County', 24\n elif c == 'R':\n return 'Tainan County', 25\n elif c == 'S':\n return 'Kaohsiung County', 26\n elif c == 'T':\n return 'Pingtung County', 27\n elif c == 'U':\n return 'Hualien County', 28\n elif c == 'V':\n return 'Taitung County', 29\n elif c == 'W':\n return 'Kinmen County', 32\n elif c == 'X':\n return 'Penghu County', 30\n elif c == 'Y':\n return 'Yangmingshan Management Bureau', 31\n elif c == 'Z':\n return 'Lienchiang County', 33\n else:\n # Should not happen\n return None, None # The return value is a tuple containing two values", "def handle_special_cases(substring):\r\n if substring==['', 'EB/NB', '2'] or substring==['EB/NB', '2', ''] or substring==['', 'EB/NB', '2', '#1'] or substring==['', 'EB/NB', '2', '#2']:\r\n dir='E'\r\n road=2\r\n elif substring== ['', 'WB/SB', '2'] or substring==['WB/SB', '2', '']:\r\n dir='W'\r\n road=2\r\n elif substring==['WB210', '']:\r\n dir='W'\r\n road=210\r\n elif substring==['S', '605/W', '10', '']:\r\n dir='W'\r\n road=10 \r\n elif substring==['', 'NB5', 'TRK', 'RTE']:\r\n dir='N'\r\n road=5\r\n elif substring==['', 'S605/E10']:\r\n dir='E'\r\n road=10\r\n else:\r\n dir=None\r\n road=0\r\n return dir, road", "def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)", "def phone_parser(phone, mode='PL'):\n\n if not phone:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(phone, str):\n raise WrongInput(\"Invalid phone format\")\n\n if mode == 'PL':\n gsm_prefixes = ['50', '51', '53', '57', '60', '66', '69', '72', '73', '78', '79', '88']\n if phone[:2] in gsm_prefixes:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{3}\\D*\\d{3}) # rest of number - divide into 3 3-digit sequences with optional separators\n # (e.g. '605-789-567')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{2}\\D*\\d{2}) # rest of number - divide into 3 2-digit sequences with optional separators\n # (e.g. '605-78-56')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match the beginning of the string\n (\\d{3}) # area code of 3 digits (e.g. '800')\n \\D* # optional separator\n (\\d{3}\\D*\\d{4}\\D*\\d+) # rest of number - divide into 3 sequences with optional separators: two obligatory\n # with 3 and 4 digits, one optional with any number of digits\n $ # end of string\n ''', re.VERBOSE)\n if not re.search(phone_pattern, phone):\n raise WrongInput(\"Invalid phone format.\")\n\n phone_obj = phone_pattern.search(phone)\n phone_area, phone_num = phone_obj.groups()\n phone = re.sub(r'\\D', '', phone_num)\n return phone, phone_area, phone_num", "def __process_address(self, address: Tuple[int, int, int, int, int]) -> Dict[str, int]:\n return {\n 'interface': address[0],\n 'protocol': address[1],\n 'type': address[2],\n 'hardware_type': address[3],\n 'address': address[4],\n }", "def parse_name(first_name, last_name):\n\n return first_name + \" \" + last_name", "def get_address():\r\n address = input(\"What is the customer's address?: \")\r\n\r\n return address", "def replace_street(street):\r\n if isinstance(street, str):\r\n for rep in replacements:\r\n street = re.sub(rep, \"\", street)\r\n\r\n streetint = re.findall(r'\\d+', str(street))\r\n if len(streetint) > 0 and int(streetint[0]) < 100:\r\n street = int(streetint[0])\r\n\r\n if street < 10:\r\n street = '0' + str(street) + str(streetnums[str(street)])\r\n elif street < 14:\r\n street = str(street) + 'TH'\r\n else:\r\n street = str(street) + str(streetnums[str(street)[-1]])\r\n\r\n\r\n return street", "def number(full_address):\n warning_message = \"\"\"\\n\n This parser should be used with the knowledge that this\n function is open to four significant vulnerabilities:\n 1) `number()` will parse the first numeric characters it\n an address string contains (read from left to right).\n If the address string has:\n a) no building number\n b) numeric characters unrelated to addressable\n information at the start of the address string\n 2) Address numbers separated by `&` or `,` will not be parsed\n 3) Building names that include numeric characters are\n incorrectly parsed as building numbers\\n\n \"\"\"\n warnings.warn(warning_message)\n return capture_address_element(NUMBER_PATTERN, full_address)", "def clean_street(self):\n street = self.cleaned_data['street'].strip().title()\n street = re.sub(r'\\bRoad\\b', 'Rd', street)\n street = re.sub(r'\\bStreet\\b', 'Str', street)\n street = re.sub(r'\\bAvenue\\b', 'Ave', street)\n street = re.sub(r'\\bParkway\\b', 'Pkwy', street)\n street = re.sub(r'\\bSuite\\b', 'Ste', street)\n street = re.sub(r'\\bApartment\\b', 'Apt', street)\n street = re.sub(r'\\s+', ' ', street) # Remove runs of spaces\n return street", "def split_str(str):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('{0}'.format(str))\n \n match = re.match(r\"([0-9]+.?\\d{0,32}?)(d|m|s)\", str)\n \n if match:\n items = match.groups()\n \n return items[0], items[1]", "def parse_puzzle(puzzle):\n puzzle = re.sub(\"\\sGrid \\d{2}\",\"\", sample)\n puzzle = puzzle.strip().split(\"\\n\") \n return puzzle", "def return_intersections(streetname):\r\n if streetname != None and isinstance(streetname, str) and ' AND ' in streetname:\r\n streetnames = streetname.split(' AND ')\r\n df = streetintersections[(streetintersections.Street == streetnames[0]) \\\r\n & (streetintersections.Isection == streetnames[1])]\r\n if df.shape[0] > 0:\r\n return str(int(df['Number'].iloc[0])) + ' ' + df['Street'].iloc[0] + ' ' + df['Suffix'].iloc[0]\r\n return None", "def mysplit(string):\n result = []\n last_split = 0\n for i in range(len(string)-3):\n if( string[i] == \"a\" and\n string[i+1] == \"n\" and\n string[i+2] == \"d\"):\n partial = string[last_split:i]\n last_split = i+3\n result.append(partial)\n rest = string[last_split:]\n result.append(rest)\n return result", "def extract(self, str):\n\n ips = re.match( r'^[0-9]+(?:\\.[0-9]+){3}', str)\n\n if ips:\n return ips.group(0)", "def capture_address_element(regex_object, full_address):\n full_address = normalise_address(full_address)\n capture_groups = regex_object.search(full_address)\n if capture_groups:\n return capture_groups.group(0)\n return ''", "def parse_info(s:str) -> dict:\n d = {}\n d[\"SVTYPE\"] = re.search(r'(?<=SVTYPE=)\\w+',s).group(0)\n d[\"SUPPORT\"] = re.search(r'(?<=SUPPORT=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"BND\"]:\n return d\n d[\"END\"] = re.search(r'(?<=END=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"INV\"]:\n return d\n d[\"SVLEN\"] = re.search(r'(?<=SVLEN=)(.*?)(?=;)',s).group(0)\n d[\"READS\"] = re.search(r'(?<=READS=)(.*?)(?=$)',s).group(0).split(\",\")\n if d[\"SVTYPE\"] == \"INS\":\n d[\"SEQS\"] = re.search(r'(?<=SEQS=)(.*?)(?=;)',s).group(0).split(\",\")\n return d", "def test_parse():\n first = parse_formula(\"PO4H2(CH2)12CH3\")\n assert first == {\"P\":1, \"O\":4, \"H\":29, \"C\":13}\n\n second = parse_formula(\"H2O\")\n assert second == {\"H\":2, \"O\":1}", "def parse_streettype(self):\n \n\n try:\n word = sttype[self.words[self.index]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n itag = word['tag'].index(Vocabulary.STREET_TYPE)\n lemma = word['lemma'][itag]\n return lemma, 1\n return None, 0\n except: return None, 0", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]", "def _parse_boardline(self, start, street):\n # Exceptions caught in _parse_street.\n board_line = self._splitted[start]\n\n match = self._STREET_RE.search(board_line)\n cards = match.group(1)\n cards = tuple(cards.split()) if street == 'flop' else cards\n setattr(self, street, cards)\n\n pot = match.group(2)\n setattr(self, \"%s_pot\" % street, Decimal(pot))\n\n num_players = int(match.group(3))\n setattr(self, \"%s_num_players\" % street, num_players)", "def parse_drs_identifier(drs_candidate: str) -> Tuple[str, str, str]:\n # determine if hostname or compact identifier or unknown\n drs_regex = r\"drs://([A-Za-z0-9\\.\\-\\~]+)/([A-Za-z0-9\\.\\-\\_\\~\\/]+)\"\n # either a drs prefix:\n\n matches = re.findall(drs_regex, drs_candidate, re.UNICODE)\n\n if len(matches) == 1: # this could be a hostname DRS id\n hostname_regex = (\n r\"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*\"\n r\"([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$\"\n )\n hostname_matches = re.findall(hostname_regex, matches[0][0], re.UNICODE)\n if len(hostname_matches) == 1:\n return matches[0][0], matches[0][1], \"hostname\"\n # possible compact rep\n compact_regex = r\"([A-Za-z0-9\\.\\-\\~]+)/([A-Za-z0-9\\.\\-\\_\\~\\/]+)\"\n matches = re.findall(compact_regex, drs_candidate, re.UNICODE)\n if len(matches) == 1 and len(matches[0]) == 2:\n return matches[0][0], matches[0][1], \"compact\"\n\n # can't figure out a this identifier\n return \"\", \"\", \"unknown\"", "def split_addr(self, a):\n a = a.replace('http://', '')\n a = a.replace('https://', '')\n\n addr = tlde.extract(a)\n is_ip = tlde.tldextract.looks_like_ip(addr.domain)\n if is_ip:\n ip = addr.domain\n path_and_params = a[a.index(ip)+len(ip):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'ip': ip, 't3': None, 't2': None, 'path': path, 'params': params, 'url/ip': 'ip'}\n else:\n t3 = addr.subdomain\n t2 = addr.registered_domain\n path_and_params = a[a.index(addr.fqdn)+len(addr.fqdn):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'t3': t3, 't2': t2, 'ip': None, 'path': path, 'params': params, 'url/ip': 'url'}", "def get_address_parts(ip, mask):\n\n ip_bin_str = address_to_bin(ip).replace('.', '')\n mask_bin_str = address_to_bin(mask).replace('.', '')\n\n net_size = mask_bin_str.rfind('1') + 1\n host_size = ADDR_LEN - net_size\n\n net = _address_from_bin_list(_split_bin_str_on_ocsets(ip_bin_str[:net_size] + '0' * host_size))\n host = _address_from_bin_list(_split_bin_str_on_ocsets('0' * net_size + ip_bin_str[-host_size:]))\n net_count = 2 ** host_size - 2\n count_string = '2^{0}-2'.format(host_size)\n return net, host, net_count, count_string", "def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")", "def street_address1(self) -> str:\n return pulumi.get(self, \"street_address1\")", "def get_city(string):\n city = \"\"\n previous_ch = None;\n\n #For each character in string\n for ch in string:\n #break if it is a comma, the city has been completed\n if ch == \",\":\n break\n #if the character is a letter, add it to the \"city\" string\n elif ch.isalpha():\n city += ch\n #if the character is a space, and the previous character is a letter, add the space to the \"city\" string. (This prevents duplicate spaces)\n elif ch.isspace() & previous_ch.isalpha():\n city += ch\n\n #update previous character\n previous_ch = ch\n\n return city", "def format_address(line1, line2, city, state, zipcode):\n\t\n\tstreetlines = line1\n\tcityline = city\n\t\n\tif len(streetlines) > 0 and len(line2) > 0:\n\t\tstreetlines += \"\\n\"\n\t\n\tif len(cityline) > 0 and len(state) > 0:\n\t\tcityline += \", \"\n\t\n\tstreetlines += line2\n\tcityline += state\n\t\n\treturn \"\\n\".join([streetlines, cityline, zipcode])", "def seperate_Loc_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n if state in us_state_abbrev:\n res.append(us_state_abbrev[state])\n return res, list(values)", "def get_first_and_last_name(full_name):\n try:\n result = full_name.split(' ', 1)\n except AttributeError:\n return ['', '']\n else:\n if len(result) == 2:\n return result\n return [full_name, full_name]", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def Parser(wa1, wa2):\r\n #Note that in the documentation, they start counting at position 1\r\n output = { \r\n 'First Borough Name': wa1[360 :369].strip(),\r\n 'House Number Display Format': wa1[369: 385].strip(),\r\n 'House Number Sort Format': wa1[385: 396].strip(),\r\n 'B10SC First Borough and Street Code': wa1[396: 407].strip(),\r\n 'Second Street Name Normalized': wa1[407:439].strip(),\r\n 'Community District': wa2[149:152].strip(),\r\n 'Zip Code': wa2[152:157].strip(),\r\n 'Election District': wa2[157:160].strip(),\r\n 'Assembly District': wa2[160:162].strip(),\r\n 'Congressional District': wa2[163:165].strip(),\r\n 'State Senatorial District': wa2[165:167].strip(),\r\n 'City Council District': wa2[169:171].strip(),\r\n 'Police Precinct': wa2[191:194].strip(),\r\n 'Community School District': wa2[203:205].strip(),\r\n 'Atomic Polygon': wa2[205: 208].strip(),\r\n '2010 Census Tract': wa2[223: 229].strip(),\r\n '2010 Census Block': wa2[229:233].strip(),\r\n '2010 Census Block Suffix': wa2[233].strip(),\r\n 'Neighborhood Tabulation Area (NTA)': wa2[245:249].strip(),\r\n 'DSNY Snow Priority Code': wa2[249].strip(),\r\n 'Hurricane Evacuation Zone (HEZ)': wa2[260:262].strip(),\r\n 'Spatial Coordinates of Segment': {'X Coordinate, Low Address End': wa2[313:320].strip(),\r\n 'Y Coordinate, Low Address End': wa2[320:327].strip(),\r\n 'Z Coordinate, Low Address End': wa2[327:334].strip(),\r\n 'X Coordinate, High Address End': wa2[334:341].strip(),\r\n 'Y Coordinate, High Address End': wa2[341:348].strip(),\r\n 'Z Coordinate, High Address End': wa2[348:355].strip(),\r\n },\r\n 'Roadway Type': wa2[444:446].strip(),\r\n 'Bike Lane': wa2[486].strip(),\r\n 'NTA Name': wa2[553: 628].strip(),\r\n 'USPS Preferred City Name': wa2[628:653].strip(),\r\n 'Latitude': wa2[653:662].strip(),\r\n 'Longitude': wa2[662: 673].strip(),\r\n 'Borough Block Lot (BBL)': {'Borough code': wa2[1533].strip(),\r\n 'Tax Block': wa2[1534:1539].strip(),\r\n 'Tax Lot': wa2[1539:1543].strip(),\r\n },\r\n 'Building Identification Number (BIN) of Input Address or NAP': wa2[1581:1588].strip(),\r\n 'X-Y Coordinates of Lot Centroid': wa2[1699:1713].strip(),\r\n 'Spatial X': wa2[125:132].strip(),\r\n 'Spatial Y': wa2[132:139].strip(),\r\n 'Message': wa1[579:659].strip(),\r\n }\r\n return output", "def return_list_from_string(inputx):\n x = inputx.split(\";\")\n for wrd in x:\n if \"WORD1\" in wrd or \"NAME1\" in wrd:\n wrd2 = (\n wrd.replace(\"WORD1:\", \"\")\n .replace(\"{{WORD1}}:\", \"\")\n .replace(\"NAME1:\", \"\")\n .replace(\"{{NAME1}}:\", \"\")\n )\n wrd3 = wrd2.strip()\n wrds = wrd3.replace(\"[\", \"\").replace(\"]\", \"\")\n wrds1 = wrds.split(\",\")\n wrds1 = [w.strip() for w in wrds1]\n if \"WORD2\" in wrd or \"NAME2\" in wrd:\n wrd2 = (\n wrd.replace(\"WORD2:\", \"\")\n .replace(\"{{WORD2}}:\", \"\")\n .replace(\"NAME2:\", \"\")\n .replace(\"{{NAME2}}:\", \"\")\n )\n wrd3 = wrd2.strip()\n wrds = wrd3.replace(\"[\", \"\").replace(\"]\", \"\")\n wrds2 = wrds.split(\",\")\n wrds2 = [w.strip() for w in wrds2]\n else:\n wrds2 = \"\"\n return wrds1, wrds2", "def _parse_location(location_string):\n location_regex = r\"(\\d+)-(\\d+)(\\(+\\)|\\(-\\)|)\"\n match = re.match(location_regex, location_string.strip())\n start, end, strand = match.groups()\n return int(start), int(end), -1 if strand == \"(-)\" else 1", "def get_apartment_address(self, soup, apartment_dict):\n\n info_class = soup.find_all('div', {'class': 'info'})\n if info_class and len(info_class) > 0:\n info_class = info_class[0]\n address = info_class.find('h2').text.strip()\n\n from parse import parse\n address = parse(\"Location: {}\", address)[0]\n apartment_dict['address'] = address\n else:\n logging.warning(\"Failed to parse apartment address\")\n return", "def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z", "def parse(self, item: str) -> Tuple[str, str]:\n try:\n city, *_, country = item.split(',')\n except ValueError:\n return '', item\n return city.strip(), country.strip()", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def city_parser(city: str = None):\n return city.text.strip().split(',')[1]" ]
[ "0.70592016", "0.66491073", "0.61709803", "0.6159874", "0.614569", "0.6053972", "0.6050844", "0.6037463", "0.5887759", "0.58091587", "0.5799028", "0.57310367", "0.56918335", "0.56567967", "0.5650815", "0.5538323", "0.5499395", "0.54938877", "0.5493846", "0.54662937", "0.54369164", "0.54308015", "0.542838", "0.5418099", "0.54002714", "0.5393423", "0.5390481", "0.5348042", "0.531965", "0.5287658", "0.52861094", "0.52745956", "0.52560294", "0.52428436", "0.52399087", "0.5206124", "0.51955557", "0.5187575", "0.51825696", "0.5165678", "0.5143241", "0.511956", "0.5112951", "0.51083136", "0.5108105", "0.5080235", "0.5076768", "0.50642395", "0.5055932", "0.5018596", "0.5015713", "0.50139517", "0.500463", "0.5000048", "0.4993118", "0.49846083", "0.49795932", "0.49770942", "0.49770942", "0.49735677", "0.496703", "0.49651313", "0.49561158", "0.49473786", "0.49287015", "0.49200198", "0.49168384", "0.4908089", "0.4907499", "0.49034625", "0.48872048", "0.48841047", "0.48804402", "0.4873048", "0.4872637", "0.48660403", "0.48651472", "0.48565593", "0.48414782", "0.4835003", "0.48338005", "0.48298162", "0.48274857", "0.4827307", "0.4822161", "0.4820303", "0.48161325", "0.47975326", "0.47942722", "0.4782617", "0.47810772", "0.47752944", "0.47751775", "0.477391", "0.4767785", "0.47657555", "0.475858", "0.4751523", "0.47486657", "0.47446442" ]
0.55393785
15
Helper function to construct multidimensional dictionaries e.g myhash = _makehash() myhash[1][2] = 4 myhash[2][5][8] = 17
def _makehash(): return defaultdict(_makehash)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def __init__(self):\n self.hashmap = [[[],[]] for _ in range(self.N)]", "def boardtohashmap(board_2d: List[List[str]]) -> Dict[Tuple[int, int], Gridspace]:\n\n nrows, ncols = len(board_2d), len(board_2d[0])\n return {\n (r, c): Gridspace(r, c, board_2d[r][c], nrows, len(board_2d[r]))\n for r in range(nrows) for c in range(len(board_2d[r]))\n }", "def generate_dict(length):\r\n primeDict = {}\r\n index = 2\r\n \r\n while (index < length):\r\n primeDict[index]=True\r\n index = index+1\r\n \r\n return primeDict", "def _build_hash_table(arr: [str]):\n ht = {}\n for cur_str in arr:\n\n anagram = cur_str[::-1]\n if cur_str in ht.keys():\n # This string is an anagram of some previous\n # Increase anagram count for hash table item\n (original, orig_cnt, anag_cnt) = ht[cur_str]\n ht[cur_str] = (original, orig_cnt, anag_cnt + 1)\n elif anagram in ht.keys():\n # This string equals to some prevoius\n # Increase original count for hash table item\n (original, orig_cnt, anag_cnt) = ht[anagram]\n ht[anagram] = (original, orig_cnt+1, anag_cnt)\n else:\n # This string is new\n ht[anagram] = (cur_str, 1, 0)\n return ht", "def _make_hashable(items):\n\n def convert(x):\n # Perform any conversions here to make a variable hashable\n if isinstance(x, np.ndarray):\n # Create an sha1 of the data, and throw in a string\n # and the shape.\n return ('__type_np.ndarray', x.shape,\n xxhash.xxh3_128_hexdigest(x))\n elif isinstance(x, (list, tuple)):\n return _make_hashable(x)\n elif isinstance(x, dict):\n return _make_hashable(sorted(x.items()))\n return x\n\n return tuple(map(convert, items))", "def createdict(Matrix,List):\r\n n = len(List)\r\n #to get all possible combinations\r\n input_combns = list(itertools.combinations(range(0,n),2))\r\n d = defaultdict(dict)\r\n for x in input_combns:\r\n i,j = x\r\n p,q = List[i],List[j]\r\n d[p][q] = Matrix[i][j]\r\n return d", "def generate_grid_dict(height, width):\n board = {}\n for i in range(height):\n for j in range(width):\n position = (i, j)\n board[position] = 0\n return board", "def _hash(self) -> None:\r\n # for a unit cube there are 8 possible hashes\r\n # returns the tuple of with all 8 hashes\r\n\r\n self.hashes[\"aaa\"] = P[P[P[self.xi] + self.yi] + self.zi]\r\n self.hashes[\"aab\"] = P[P[P[self.xi] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"aba\"] = P[P[P[self.xi] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"abb\"] = P[P[P[self.xi] + self._inc(self.yi)] + self._inc(self.zi)]\r\n self.hashes[\"baa\"] = P[P[P[self._inc(self.xi)] + self.yi] + self.zi]\r\n self.hashes[\"bab\"] = P[P[P[self._inc(self.xi)] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"bba\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"bbb\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self._inc(self.zi)]", "def initialize_d(d, square_sides, offset=0):\n return {key:[] for key in range(offset, square_sides ** 2 + offset)}", "def fresh_hash(self):\n _h = defaultdict(lambda: 0)\n very_small = 0.000000000001\n for g in self.groups: _h[g] = { \"total\": very_small, \"var_all\": 0 }\n return _h", "def make_dict(unused_s, unused_l, toks):\n result = {}\n key_value_pairs = chunks(toks, 2)\n for key_value_pair in key_value_pairs:\n result[key_value_pair[0]] = key_value_pair[1]\n return result", "def create_dictionary():\n d = {}\n for y in range(HEIGHT):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(WIDTH):\n xy_tuple = (x,y)\n d[pos] = xy_tuple\n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1\n \n return d", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output", "def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []", "def build_anagram_dict(word_gen, starting_dict={}, hash_fn=lambda x: tuple(sorted(x))):\n\n dict = starting_dict\n\n for word in word_gen:\n key = hash_fn(word)\n # Using dictionary as hashtable to eliminate duplicates (when reading from literature etc)\n word_list = dict.get(key, {})\n word_list[word] = None\n dict[key] = word_list\n\n return dict", "def createMap(*values):\n\tresult = dict()\n\tfor i in range(0, len(values), 2):\n\t\tresult[values[i]] = values[i+1]\n\treturn result", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def get_hash_map(init_addr):\n addr = init_addr\n hash_map = []\n for i in range(0, len(WIN_HASH), 2):\n pair = WIN_HASH[i:i+2]\n hash_map.append((addr, pair[1]))\n hash_map.append((addr+1, pair[0]))\n addr += 8\n\n return hash_map", "def DictFunction2():\r\n print \"Create Second Dictionary\"\r\n NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))\r\n print NumberDict", "def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []", "def Dictionary_create(nMarkers, markerSize):\n pass", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def create_pristine_board(size=100):\n board = defaultdict(dict)\n\n for i in xrange(1, size + 1):\n board[i] = {j: (j - i) for j in xrange(min(i + 1, size + 1), min(i + 7, size + 1))}\n\n return board", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def __create_level_entries_dict__(self,\n tree_level_labels,\n tree_level_values,\n ):\n # | - create_level_entries_dict\n level_entries_dict = {}\n for index, variable in enumerate(tree_level_labels):\n level_entries_dict[variable] = tree_level_values[index]\n\n return(level_entries_dict)\n # __|", "def test_hash_numpy():\r\n rnd = np.random.RandomState(0)\r\n arr1 = rnd.random_sample((10, 10))\r\n arr2 = arr1.copy()\r\n arr3 = arr2.copy()\r\n arr3[0] += 1\r\n obj_list = (arr1, arr2, arr3)\r\n for obj1 in obj_list:\r\n for obj2 in obj_list:\r\n yield nose.tools.assert_equal, hash(obj1) == hash(obj2), \\\r\n np.all(obj1 == obj2)\r\n\r\n d1 = {1: arr1, 2: arr1}\r\n d2 = {1: arr2, 2: arr2}\r\n yield nose.tools.assert_equal, hash(d1), hash(d2)\r\n\r\n d3 = {1: arr2, 2: arr3}\r\n yield nose.tools.assert_not_equal, hash(d1), hash(d3)\r\n\r\n yield nose.tools.assert_not_equal, hash(arr1), hash(arr1.T)", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def _make_limb_dict():\n\n return {'left_arm_y': 10, 'right_arm_y': 13,\n 'left_arm_z': 11, 'right_arm_z': 14,\n 'left_leg_y': 4, 'right_leg_y': 7,\n 'left_leg_z': 5, 'right_leg_z': 8,\n 'hip_y': 2, 'hip_x': 1}", "def make_dict(keys, values):\n\n return dict(zip(keys, values))", "def hash_double(d):\n # initialize table\n table = [\"-\"] * 19\n # consider each integer k in the input\n for k in d:\n # if k is already in the table this is a duplicate so move to next integer in the input\n # note this check for a duplicate is using the functionality of python rather than checking using a linear probe\n if k in table:\n continue\n # apply the hash function\n i = (6 * k + 3) % 19\n t = i\n # initialize count that checks whether linear probe has considered each bucket and is now full\n count = 0\n j = 0\n # while bucket is already filled\n s = 11 - (k % 11)\n while table[i] != '-':\n j += 1\n # move to next bucket\n i = (t + j*s) % 19\n # increment count\n count += 1\n\n # if table is full\n if count >= 18:\n # can return table as nothing further can be added\n break\n\n # Ensure table[i] is empty so k can be added here\n if table[i] == '-':\n table[i] = k\n\n # now each part of the input has been considered return the table\n return table", "def creating_dict(i, states):\n # base case\n if i == 5:\n # no more edges - recursion ends here\n return {'barcode': []}\n\n # iterative case\n else:\n # this is a tree structure where the node contains timepoint information and barcode information\n # and three edges link to other nodes that represent lineages in three differnet states\n updated_dict = {'t{}'.format(i): {state: creating_dict(i + 1, states) for state in states}}\n updated_dict['t{}'.format(i)].update({'barcode': []})\n return updated_dict", "def create_dictionaries(chars):\n return dict((c, i) for i, c in enumerate(chars)), dict((i, c) for i, c in enumerate(chars))", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def makedict(real, n):\n m = n/2\n d = nltk.defaultdict(list)\n for word in real:\n for num, syll in enumerate(word):\n if num != 0:\n ls = str(len(syll))\n nsylls_lsylls = [str(len(word)), str(num), ls]\n d[\"_\".join(nsylls_lsylls + [syll[:m], word[num - 1][-m:]])] += [syll]\n return(d)", "def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]", "def deep_hash(obj):\n pass", "def _create_dictionaries(self, chars):\n dictionary = dict()\n for char in chars:\n dictionary[char] = len(dictionary)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary, reverse_dictionary", "def setup2():\n ht2 = HashTable()\n ht2.set('Apple', 'Dash')\n ht2.set('Flutter', 'Sparkle')\n ht2.set('Pinky', 'Pony')\n return ht2", "def initialize_dicts(m, square_sides):\n rows_missing = defaultdict(list)\n rows_missing = initialize_d(rows_missing, square_sides)\n cols_missing = defaultdict(list)\n cols_missing = initialize_d(cols_missing, square_sides)\n squares_missing = defaultdict(list)\n squares_missing = initialize_d(cols_missing, square_sides, 1)\n return rows_missing, cols_missing, squares_missing", "def build_unq_dict_lst(self, lst1, lst2, key1 = \"start_index\", key2 = \"random_seed\"):\n dict_lst = []\n for i in range(len(lst1)):\n for j in range(len(lst2)):\n dictt = {}\n dictt[key1] = lst1[i]\n dictt[key2] = lst2[j]\n dict_lst.append(dictt)\n return dict_lst", "def ht_ant_two():\n ht = HashTable(10)\n ht.set('fond', 'averse')\n ht.set('diligent', 'idle')\n ht.set('guide', 'jam')\n return ht", "def __init__(self):\n self.hash = [[] for _ in range(20011)]", "def hashdict(self):\n return {\n 'pix': super(rmap, self).hashdict(),\n 'map': hashlib.sha1(self.map.view(np.uint8)).hexdigest()\n }", "def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )", "def create_dictionary(self,*key_value_pairs,**items):\r\n if len(key_value_pairs)%2 !=0:\r\n raise ValueError(\"create dictionary failed. there should be\"\r\n \"an even number of key-value-pairs\")\r\n return self.set_to_dictionary({},*key_value_pairs,**items)", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def multidict(ordered_pairs):\n # read all values into lists\n d = defaultdict(list)\n for k, v in ordered_pairs:\n d[k].append(v)\n # unpack lists that have only 1 item\n dict_copy = deepcopy(d)\n for k, v in iteritems(dict_copy):\n if len(v) == 1:\n d[k] = v[0]\n return dict(d)", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def nested_dict():\n return defaultdict(nested_dict)", "def build_dict(infile):\n\n coords = {}\n sizes = {}\n\n for line in infile:\n fields = line.split()\n ref_st, ref_end, qry_st, qry_end = map(int, fields[0:4])\n qry_chr, qry_size = fields[14], int(fields[8])\n if qry_chr not in coords:\n coords[qry_chr] = {0:[], 1:[]} # 0=ref; 1=qry\n sizes[qry_chr] = qry_size\n coords[qry_chr][0].append([ref_st, ref_end])\n coords[qry_chr][1].append(sorted([qry_st, qry_end]))\n \n return coords, sizes", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d", "def hash_values_demo(r):\n record = {\n \"name\": \"Hackers and Slackers\",\n \"description\": \"Mediocre tutorials\",\n \"website\": \"https://hackersandslackers.com/\",\n \"github\": \"https://github.com/hackersandslackers\"\n }\n r.hmset('business', record)\n logger.info(f\"business: {r.hgetall('business')}\")", "def buildDict(self, dict):\n for item in dict:\n length = len(item)\n if length not in self.dic:\n self.dic[length] = [item]\n else:\n self.dic[length].append(item)", "def database(words):\n\n d={}\n if len(words) < 3:\n return\n \n for i,word in enumerate(words):\n try:\n first,second,third = (words[i], words[i+1], words[i+2])\n except IndexError:\n break\n key = (first,second)\n if key not in d:\n d[key] = []\n d[key].append(third)\n \n return d", "def cons(map) -> HashMap:\r\n table = HashMap()\r\n from_dict(table, to_dict(map))\r\n return table", "def hash_quadratic(d):\n # initialize table\n table = [\"-\"] * 19\n # consider each integer k in the input\n for k in d:\n # if k is already in the table this is a duplicate so move to next integer in the input\n # note this check for a duplicate is using the functionality of python rather than checking using a linear probe\n if k in table:\n continue\n # apply the hash function\n i = (6 * k + 3) % 19\n t = i\n # initialize count that checks whether linear probe has considered each bucket and is now full\n count = 0\n j = 0\n # while bucket is already filled\n\n while table[i] != '-':\n j += 1\n # move to next bucket\n i = (t + j ** 2) % 19\n # increment count\n count += 1\n\n # if table is full\n if count >= 18:\n # can return table as nothing further can be added\n break\n\n # Ensure table[i] is empty so k can be added here\n if table[i] == '-':\n table[i] = k\n\n # now each part of the input has been considered return the table\n return table", "def make_hashable(value):\n if isinstance(value, dict):\n return tuple([\n (key, make_hashable(nested_value))\n for key, nested_value in sorted(value.items())\n ])\n # Try hash to avoid converting a hashable iterable (e.g. string, frozenset)\n # to a tuple.\n try:\n hash(value)\n except TypeError:\n if is_iterable(value):\n return tuple(map(make_hashable, value))\n # Non-hashable, non-iterable.\n raise\n return value", "def _reference(self):\r\n return {1:2, \"key1\":\"value1\", \"key2\":(1,2,3)}", "def h_python(key, N):\n return hash(key) % N", "def create_triad_counts():\n triads = [str(i) + str(j) + str(k) for i in range(2) for j in range(2) for k in range(2)]\n triad_counts = {}\n\n for triad in triads:\n triad_counts[triad] = [0, 0]\n\n return triad_counts", "def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls", "def __init__(self, max_len, max_num):\n self.a = {}\n self.a[0] = [[]]\n self.a[1] = [[1]]\n\n self.max_len = max_len\n self.max_num = max_num", "def gen_dict(keys, vals):\n retVal = {}\n for i in range(len(keys)):\n if i > len(vals):\n retVal[keys[i]] = \"\"\n continue\n retVal[keys[i]] = vals[i]\n return retVal", "def hash_from_dict(d):\r\n items = d.items()\r\n items.sort()\r\n first_part = [k for k, v in items]\r\n second_part = []\r\n for k, v in items:\r\n if isinstance(v, (tuple, list)):\r\n second_part += [tuple(v)]\r\n else:\r\n second_part += [v]\r\n tuple_items = tuple(first_part + second_part)\r\n return hash(tuple_items)", "def init_hash_uuid_lut(session, hashes):\n # Note: unhexlify is necessary since the database stores\n # binary representations of the hashes\n bin_hashes = [binascii.unhexlify(ahash.encode('utf-8'))\n for ahash in hashes]\n # print(\"==> Query hashes: {}\".format(bin_hashes))\n links = session.query(LinkageEntity).filter(\n LinkageEntity.linkage_hash.in_(bin_hashes)).all()\n\n # lut = defaultdict(lambda: [])\n lut = {}\n\n for ahash in hashes:\n # instantiate every bucket even if the hash has no record in the db\n lut[ahash] = []\n\n for link in links:\n # collect every link in the corresponding bucket\n lut[link.friendly_hash()].append(link)\n\n return lut", "def new(num_buckets=256):\n\taMap = [] #creating empty list aMap\n\tfor i in range(0, num_buckets):\n\t\taMap.append([]) #append num_buckets into aMap\n\treturn aMap", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def resize(self):\n\n # Stores the new size of the hash map\n new_size = len(self.table) * 2 - 1\n\n # Creates new hash map table (empty)\n new_table = [[]] * new_size\n\n # Creates new key hash table (empty)\n new_ref = [[]] * new_size\n\n # For key value pair in the current hash map\n for key, value in self:\n\n # Get new hashed key for new hash map\n i = hash(key) % len(new_table)\n\n # Put key in new key hash table for index reference\n new_ref[i].append(key)\n\n # Put value in hash map\n new_table[i].append(value)\n\n # Overwrite old hash map with new hash map\n self.table = new_table.copy()\n\n # Overwrite old key hash table with new key hash table\n self.keys_ref = new_ref.copy()", "def createDictionary(dataset):\r\n for columnNumber in range(2, dataset.shape[1]):\r\n print(\"manipulating \", dataset.at[0, columnNumber])\r\n manipulateData(columnNumber, dataset)\r\n return Dictionary", "def new(num_buckets=256):\n aMap=[]", "def buildDict(self, words: List[str]) -> None:\n for word in words:\n self.buckets[len(word)].append(word)", "def __hash__(self):\n return hash(('genes', tuple(self.genes), self.environment))", "def put(hash, key: int, value: V) -> HashMap:\r\n if hash == None:\r\n hash = HashMap()\r\n hash_key = key % hash.size\r\n if hash.data[hash_key].key == None:\r\n hash.data[hash_key].value = value\r\n hash.data[hash_key].key = key\r\n hash.keyset.append(key)\r\n else:\r\n temp = Node(key, value)\r\n hash.keyset.append(key)\r\n p = hash.data[hash_key]\r\n while p.next != None:\r\n p = p.next\r\n p.next = temp\r\n\r\n return hash", "def setup1():\n ht1 = HashTable()\n ht1.set('Apple', 'Jack')\n ht1.set('Pinky', 'Pie')\n ht1.set('Flutter', 'Shy')\n return ht1", "def nested_set(data, keys, value):\n for key in keys[:-1]:\n data = data.setdefault(key, {})\n data[keys[-1]] = value", "def mkPerfHash(keys, Hash):\n f1, f2, G = generate_hash(keys, Hash)\n return lambda k: (G[f1(k)] + G[f2(k)]) % len(G)", "def hash_with_depth_int(func: HashFuncT) -> HashFuncT:\n\n @wraps(func)\n def hashing_func(key, depth=1):\n \"\"\"wrapper function\"\"\"\n res = []\n tmp = func(key, 0)\n res.append(tmp)\n for idx in range(1, depth):\n tmp = func(f\"{tmp:x}\", idx)\n res.append(tmp)\n return res\n\n return hashing_func", "def hash_tuples(\n vals: MultiIndex | Iterable[tuple[Hashable, ...]],\n encoding: str = \"utf8\",\n hash_key: str = _default_hash_key,\n) -> npt.NDArray[np.uint64]:\n if not is_list_like(vals):\n raise TypeError(\"must be convertible to a list-of-tuples\")\n\n from pandas import (\n Categorical,\n MultiIndex,\n )\n\n if not isinstance(vals, ABCMultiIndex):\n mi = MultiIndex.from_tuples(vals)\n else:\n mi = vals\n\n # create a list-of-Categoricals\n cat_vals = [\n Categorical._simple_new(\n mi.codes[level],\n CategoricalDtype(categories=mi.levels[level], ordered=False),\n )\n for level in range(mi.nlevels)\n ]\n\n # hash the list-of-ndarrays\n hashes = (\n cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False)\n for cat in cat_vals\n )\n h = combine_hash_arrays(hashes, len(cat_vals))\n\n return h", "def buildDict(self, words):\n self.dict = collections.defaultdict(set)\n for word in words:\n for i in xrange(len(word)):\n self.dict[word[:i] + '*' + word[i+1:]].add(word[i])", "def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()", "def construct(HashClass, empty_structure, memoization_key, store_key, play, m, stores, edge_nout_hash):\n pmts(edge_nout_hash, HashClass)\n\n memoization = getattr(m, memoization_key)\n store = getattr(stores, store_key)\n\n structure = empty_structure\n\n todo = []\n for tup in store.all_nhtups_for_nout_hash(edge_nout_hash):\n if tup.nout_hash in memoization:\n structure = memoization[tup.nout_hash]\n break\n todo.append(tup)\n\n for tup in reversed(todo):\n edge_nout = tup.nout\n edge_nout_hash = tup.nout_hash\n\n note = edge_nout.note\n\n structure = play(m, stores, structure, note, YourOwnHash(edge_nout_hash))\n memoization[edge_nout_hash] = structure\n\n return structure", "def nested_dict():\n try:\n num_list = [1, 2, 3, 4]\n new_dict = current = {}\n for name in num_list:\n current[name] = {}\n current = current[name]\n print(new_dict)\n except ValueError as e:\n logger.error(\"Not find the dictnary\"+str(e))", "def __init__(self):\n self.hashset = [[] for _ in range(self.N)]", "def make_dict(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n return _DictMaker(struct_class=cls, positional_args=args, keyword_args=kwargs).make_dict()", "def __init__(self):\n self.size = 10000\n self.hashmap = [None] * self.size", "def __init__(self):\n self.size = 1000\n self.hash_table = [None] * self.size", "def test_get_hash(get_all_structures):\n comp_matrix = np.zeros((len(get_all_structures), len(get_all_structures)))\n for i, structure_a in enumerate(get_all_structures):\n for j, structure_b in enumerate(get_all_structures):\n if i < j:\n hash_a = get_hash(structure_a)\n hash_b = get_hash(structure_b)\n if hash_a == hash_b:\n comp_matrix[i][j] = 1\n else:\n comp_matrix[i][j] = 0\n assert sum(comp_matrix) == sum(np.diag(comp_matrix))", "def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list", "def triples_as_dict(tuples: Iterable[Tuple[Any, Any, Any]]) -> Dict[Any, List[Tuple[Any, Any]]]:\n d = defaultdict(list)\n for t in tuples:\n d[t[0]].append((t[1], t[2]))\n return d", "def create_dict(info):\n \"\"\"\n dict = {ip: {counter:*}, {weekdays: []}, {hours: []}}\n \"\"\"\n dict_info = dict()\n for i in info:\n ip = i[0]\n hours = i[1]\n weekdays = i[2]\n if ip not in dict_info:\n dict_info[ip] = {}\n dict_info[ip]['counter'] = 0\n dict_info[ip]['hours'] = []\n dict_info[ip]['weekdays'] = []\n dict_info[ip]['counter'] += 1\n dict_info[ip]['hours'].append(hours)\n dict_info[ip]['weekdays'].append(weekdays)\n return dict_info", "def tree_hash(hashes):\n while len(hashes) > 1:\n hashes = [hashlib.sha256(\"\".join(h[i:i+1])).digest() for i in range(i,2)]\n return hashes[0]", "def add_hash(self, lst):\n self.__data = []\n num = len(lst) + 1\n self.add_data((\"\", 4, 1))\n self.add_data((\"\", 4, num))\n self.add_data((\"\", 4, num - 1))\n self.add_data((\"\", 4, 0))\n if 1 < num:\n for ii in range(num - 1):\n self.add_data((\"\", 4, ii))", "def makeMap(pairs):\n\n return ConstMap.fromPairs(pairs)", "def sur_mat2dict(mat,ndim):\n kwork = np.vstack((np.zeros((1,1),dtype=np.int_),ndim))\n dicts = {}\n ki = 0\n for r in range(1,len(kwork)):\n ki = ki + kwork[r-1][0] \n ke = ki + kwork[r][0]\n dicts[r-1] = mat[ki:ke,:]\n return(dicts)", "def build_hap_dict(self, obs_tab, leg_tab, hap_tab, number_of_haplotypes):\n\n hap_dict = dict()\n mismatches = 0\n combined = {pos: (ref,alt,hap) for (chr_id,pos,ref,alt),hap in zip(leg_tab, hap_tab)}\n missing = 3*(None,)\n\n b = (1 << number_of_haplotypes) - 1 #### equivalent to int('1'*number_of_haplotypes,2)\n\n for (pos, read_id, base) in obs_tab:\n ref, alt, hap = combined.get(pos, missing)\n if base==alt:\n hap_dict[(pos,base)] = hap\n elif base==ref:\n hap_dict[(pos,base)] = hap ^ b ### ^b flips all bits of the binary number, hap_tab[ind] using bitwise xor operator.\n else:\n mismatches += 1\n\n fraction_of_matches = 1-mismatches/len(obs_tab)\n\n return hap_dict, fraction_of_matches" ]
[ "0.67796296", "0.5938041", "0.5885213", "0.5752976", "0.5734703", "0.5709291", "0.5704826", "0.5652041", "0.5644233", "0.56247675", "0.56185186", "0.5611323", "0.56072676", "0.5539448", "0.5488976", "0.5445115", "0.54388916", "0.54211164", "0.54074925", "0.53994673", "0.537734", "0.5345715", "0.53311265", "0.53041047", "0.5297536", "0.52854407", "0.52676785", "0.5266973", "0.5258118", "0.5250899", "0.52496153", "0.5241375", "0.5231931", "0.5218357", "0.5207242", "0.5207184", "0.51917005", "0.51881474", "0.5186212", "0.5170368", "0.5154522", "0.5148854", "0.514845", "0.5140317", "0.51398796", "0.51284385", "0.51237446", "0.51127976", "0.51089096", "0.51042277", "0.51008743", "0.50988376", "0.5083378", "0.507889", "0.50674933", "0.5054963", "0.5053744", "0.5046487", "0.50411546", "0.50395423", "0.50364566", "0.50306463", "0.5029837", "0.5019204", "0.5018503", "0.50161046", "0.50142056", "0.50092286", "0.5005394", "0.50008935", "0.49992308", "0.4986751", "0.49827823", "0.49817964", "0.4977092", "0.49751556", "0.49742776", "0.4972668", "0.4971876", "0.4970676", "0.49635354", "0.49612945", "0.49601033", "0.4959278", "0.49579743", "0.49539164", "0.4944816", "0.49408767", "0.49396935", "0.49371246", "0.49094182", "0.49085245", "0.48967183", "0.48946962", "0.48938245", "0.48915505", "0.48869178", "0.48836964", "0.4882849", "0.4877624" ]
0.6773854
1
Convert headers of fetched tickers to same format for convenient data storage in Database. This method assumes that parser's headers are configured properly(headers_dict), if one of the headers is missing in config file exception raised
def convert_headers(self, tickers): result = _makehash() for pair_name, fetched_values_dict in list(tickers.items()): for header, value in list(fetched_values_dict.items()): result[pair_name][self.config['headers'][header]] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new", "def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())", "def fill_headers(self, headers):\n self.headers = {h[0]: h[1] for h in headers}", "def _parse_headers(raw_headers: List[str]) -> Dict[str, str]:\n headers: Dict[str, str] = {}\n for header in raw_headers:\n name = header[: header.find(\":\")].strip()\n value = header[header.find(\":\") + 1 :].strip()\n headers[name.lower()] = value\n\n return headers", "def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)", "def _headers(self, headers_dict):\n return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))", "def manage_headers(dem_header_file, header_paths):\n dem_header = parse_dem_header(dem_header_file)\n # find param files containing filename dates\n if len(header_paths) == 2:\n headers = [parse_epoch_header(hp) for hp in header_paths]\n combined_header = combine_headers(headers[0], headers[1], dem_header)\n else:\n # probably have DEM or incidence file\n combined_header = dem_header\n combined_header[ifc.DATA_TYPE] = ifc.DEM\n\n return combined_header", "def __parseHeaders(headers):\n global __all_headers\n if headers and len(headers) > 0:\n for header in headers:\n name = header.getElementsByTagName(\"name\")[0].childNodes[0].data\n value = header.getElementsByTagName(\"value\")[0].childNodes[0].data\n __addHeader(name, value)\n #print(__all_headers)", "def _ToTuples(headers):\n all_headers = []\n for line in headers:\n if line[0] in '\\t ':\n if not all_headers:\n logging.warning(\n 'Unexpected response header continuation line [%s]', line)\n continue\n name, value = all_headers.pop()\n value += '\\n ' + line.strip()\n else:\n name_value = RealHttpFetch._GetHeaderNameValue(line)\n if not name_value:\n logging.warning(\n 'Response header in wrong format [%s]', line)\n continue\n name, value = name_value # pylint: disable=unpacking-non-sequence\n all_headers.append((name, value))\n return all_headers", "def updateheader(self, headerlist=[], http_s_obj=None):\n header = {}\n for headerparam in headerlist:\n key_value = headerparam.split(\":\", 1)\n if len(key_value) == 2:\n try:\n key = key_value[0]\n value = key_value[1].strip()\n header.update({key: value})\n if http_s_obj:\n if http_s_obj.header.get(key):\n http_s_obj.header.update({key: value})\n except Exception:\n continue\n return header", "def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers", "def test_normalize_headers():\n headers = [\n 'AllocationTransferAgencyIdentifier', 'BeginningPeriodOfAvailability', 'flex_mycol', 'FLEX_ANOTHER'\n ]\n mapping = {'allocationtransferagencyidentifier': 'ata', 'beginningperiodofavailability': 'boa'}\n\n result = csvReader.normalize_headers(headers, False, mapping)\n assert list(result) == [\n 'allocationtransferagencyidentifier', 'beginningperiodofavailability', 'flex_mycol', 'flex_another'\n ]\n result = csvReader.normalize_headers(headers, True, mapping)\n assert list(result) == ['ata', 'boa', 'flex_mycol', 'flex_another']", "def _make_headers_df(headers_response):\n\n headers_df = util.make_dataframe(headers_response)\n headers_df = headers_df[\n [\"text\", \"column_index_begin\", \"column_index_end\", \"row_index_begin\", \"row_index_end\", \"cell_id\",\n \"text_normalized\"]]\n return headers_df", "def _headers(self) -> Mapping[str, str]:\n return {}", "def _normalize_headers(self):\n self.ncookies=dict((k.lower(), v) for k, v in self.request.cookies.iteritems())\n self.nheaders=dict((k.lower(), v) for k, v in self.request.headers.iteritems())", "def scrub_headers(headers):\n if isinstance(headers, dict):\n headers = headers.items()\n headers = [\n (parse_header_string(key), parse_header_string(val))\n for (key, val) in headers\n ]\n if not logger_settings.get('redact_sensitive_headers', True):\n return dict(headers)\n if logger_settings.get('reveal_sensitive_prefix', 16) < 0:\n logger_settings['reveal_sensitive_prefix'] = 16\n return {key: safe_value(key, val) for (key, val) in headers}", "def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict", "def my_normalize(self, headers):\n ret = normalize(self, headers)\n if 'authorization' in ret:\n ret['Authorization'] = ret.pop('authorization')\n return ret", "def parse_header(self):", "def set_headers(self, headers):\n self.headers = headers\n process_headers(self)\n self.character_encoding = self.parsed_headers.get(\n 'content-type', (None, {})\n )[1].get('charset', 'utf-8') # default isn't UTF-8, but oh well", "def decode_header(header):\n new_header = {}\n\n for item in header:\n split = item.split('\\t')\n new_header[split[0].replace(':', '')] = split[1].replace(\"\\r\\n\", \"\")\n\n return new_header", "def from_headers(self, headers):\n try:\n # First IP address is the one of the client\n ip = headers['X_FORWARDED_FOR'].split(',')[0].strip()\n except KeyError:\n ip = headers.get('REMOTE_ADDR')\n\n if ip:\n # Double-check if the address has a valid format\n if re.match(r'^[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}\\.[\\d+]{1,3}$',\n ip, re.I):\n ip = None\n\n # Exclude private IP address ranges\n if re.match(r'^(?:127\\.0\\.0\\.1|10\\.|192\\.168\\.|172\\.(?:1[6-9]|2[0-9]|3[0-1])\\.)', ip):\n ip = None\n\n self.ip_address = ip\n\n self.user_agent = headers.get('HTTP_USER_AGENT')\n\n if 'HTTP_ACCEPT_LANGUAGE' in headers:\n parsed_locales = []\n res = re.findall(\n r'(^|\\s*,\\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\\s*(;\\s*q\\s*=\\s*(1(\\.0{0,3})?|0(\\.[0-9]{0,3})))?', \n headers['HTTP_ACCEPT_LANGUAGE'], re.I)\n for r in res:\n name = r[1].replace('-', '_')\n value = 1 if not r[4] else r[4]\n parsed_locales += [(name, value)]\n\n self.locale = sorted(parsed_locales, key=lambda x: x[1],\n reverse=True)[0][0]\n\n return self", "def filter_headers(self, header):\n if header == \"Ticker symbol\":\n return \"symbol\"\n elif header == \"GICS Sector\":\n return \"sector\"\n elif header == \"Security\":\n return \"name\"\n elif header == \"GICS Sub Industry\":\n return \"industry\"\n else:\n return header", "def _parse_rate_limit_headers(headers):\n limit = int(headers[\"X-RateLimit-Limit\"])\n remaining = int(headers[\"X-RateLimit-Remaining\"])\n reset_at_utc = int(headers[\"X-RateLimit-Reset\"])\n return {\n \"limit\": limit,\n \"used\": limit - remaining,\n \"remaining\": remaining,\n \"reset_at_utc\": reset_at_utc,\n \"reset_in_sec\": reset_at_utc - round(time.time()),\n \"last_update\": round(time.time())\n }", "def generate_header_dic(self, header_strings):\n headers = dict()\n\n for header_values in header_strings:\n header_list = header_values.split(':')\n headers[header_list[0]] = header_list[1]\n return headers", "def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def missing_header_fields():\n auth_token = get_auth_token()\n\n headers = '{\"Host\": \"$host\",\"Date\": \"DATE\",'\n headers += '\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, token=auth_token)", "def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header", "def _split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)", "def headers(self, v):\n raise NotImplementedError", "def encode_meta_headers(headers):\n ret = {}\n for header, value in headers.items():\n value = encode_utf8(value)\n header = header.lower()\n\n if (isinstance(header, str) and\n header.startswith(USER_METADATA_TYPE)):\n header = encode_utf8(header)\n\n ret[header] = value\n return ret", "def check_headers(self, headers):\n h = headers.values()[0]\n\n if 'DT' in PAR:\n if h.dt != PAR.DT:\n h.dt = PAR.DT\n\n if 'NT' in PAR:\n if h.nt != PAR.NT:\n print 'Warning: h.nt != PAR.NT'\n\n if 'NREC' in PAR:\n if h.nr != PAR.NREC:\n print 'Warning: h.nr != PAR.NREC'\n\n return h", "def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment", "def set_headers(self, headers):\n\n if isinstance(headers, dict):\n headers = headers.items()\n\n # NOTE(kgriffs): We can't use dict.update because we have to\n # normalize the header names.\n _headers = self._headers\n for name, value in headers:\n _headers[name.lower()] = value", "def scrub_headers(self, header_dict):\n return self.__headers_scrubber(header_dict)", "def setup_request_headers(self, uri: str = None) -> dict:\n\n uri = uri if uri is not None else self.host\n headers = {\n \"Accept\": \"application/atom+json,application/json\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en_US\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": self.user_agent,\n \"Referer\": uri + \"/suite/tempo/\",\n \"X-Appian-Cached-Datatypes\": self.datatype_cache.get(),\n \"Cookie\": \"JSESSIONID={}; __appianCsrfToken={}; __appianMultipartCsrfToken={}\".format(\n self.client.cookies.get(\"JSESSIONID\", \"\"),\n self.client.cookies.get(\"__appianCsrfToken\", \"\"),\n self.client.cookies.get(\"__appianMultipartCsrfToken\", \"\"),\n ),\n \"DNT\": \"1\",\n \"X-APPIAN-CSRF-TOKEN\": self.client.cookies.get(\"__appianCsrfToken\", \"\"),\n \"X-APPIAN-MP-CSRF-TOKEN\": self.client.cookies.get(\"__appianMultipartCsrfToken\", \"\"),\n \"X-Appian-Ui-State\": \"stateful\",\n \"X-Appian-Features\": self.client.feature_flag,\n \"X-Appian-Features-Extended\": self.client.feature_flag_extended,\n \"x-libraries-suppress-www-authenticate\": \"true\",\n # this should probably go...\n \"X-Atom-Content-Type\": \"application/html\"\n }\n return headers", "def build_headers(self):\n\n # User-agent is always sent\n headers = {'user-agent': self.useragent}\n for hdr in self.config.client_standard_headers:\n val = getattr(self.config, 'client_' + hdr.lower().replace('-','_'))\n headers[hdr] = val\n\n return headers", "def test_header_parser_vanilla(self):\n lines = [\"Content-Type: application/json\", \"Accept: application/json\"]\n h = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n headers = parser._parse_headers(lines)\n self.assertEqual(h, headers)", "def _parse_raw_header_entries(header_entries):\n\n def __check_key(key):\n return not(\"_\" in key or \" \" in key or \":\" in key or not len(key))\n\n result = {}\n if (len(header_entries) < 1):\n return result\n\n # Remove leading '--'\n header_entries = header_entries[1:]\n if (not len(header_entries) % 2 == 0):\n raise ValueError(\"last key does not have a value\")\n\n while (len(header_entries)):\n # Retrieve raw key\n logging.debug(\"current header content \" + str(header_entries))\n word = header_entries[0]\n header_entries = header_entries[1:]\n\n # Try to trim equal\n if (word[-1] == ':'):\n word = word[:-1]\n\n if(not __check_key(word)):\n raise ValueError(\"invalid key '{}' in key value list\".format(word))\n\n result[word] = header_entries[0]\n header_entries = header_entries[1:]\n\n return result", "def _extract_metadata(self, header, cleaner):\n metadata = []\n for k, v in header.items():\n key = str(cleaner(k)) # clean key and ensure it is a string\n val = str(cleaner(v)) # clean value and ensure it is a string\n if (key and val):\n metadata.append(Metadatum(key, val))\n return metadata", "def trim_headers(all_headers, relevant_headers=[\"From\", \"To\", \"Subject\", \"Date\"]):\n data = {}\n for header in all_headers:\n if header['name'] in relevant_headers:\n data[header['name']] = header['value']\n\n return data", "def get_headers(headers: HTTPHeaders) -> Mapping[str, List[str]]:\r\n return {header.lower(): headers.get_list(header) for header in headers.keys()}", "def _make_headers() -> CaseInsensitiveDict:\n headers = CaseInsensitiveDict()\n headers['Authorization'] = f'Token {os.environ[\"TOKEN\"]}'\n headers['Content-type'] = 'application/json'\n return headers", "def __get_headers(self, passed_headers: Dict) -> Dict:\n\n # User-Agent for HTTP request\n library_details = [\n f\"requests {requests.__version__}\",\n f\"python {platform.python_version()}\",\n f\"connector {self.__class__.__name__}\",\n ]\n library_details = \"; \".join(library_details)\n user_agent = f\"Infermedica-API-Python {__version__} ({library_details})\"\n\n headers = {\n \"Accept\": \"application/json\",\n \"User-Agent\": user_agent,\n \"App-Id\": self.app_id,\n \"App-Key\": self.app_key,\n }\n headers.update(self.default_headers)\n headers.update(passed_headers) # Make sure passed headers take precedence\n return headers", "def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)", "def parse_header_dict(self, header_dict=None):\n if header_dict is not None:\n self.header_dict = header_dict\n\n assert isinstance(self.header_dict, dict)\n\n for key, value in self.header_dict.items():\n if \"wire\" in key:\n if key.find(\"n\") == 0:\n self.ex_length = float(value.split()[0])\n self.ex_azimuth = float(value.split()[1])\n elif key.find(\"e\") == 0:\n self.ey_length = float(value.split()[0])\n self.ey_azimuth = float(value.split()[1])\n elif \"system\" in key:\n self.box_id = value.split(\";\")[0].strip()\n self.mag_id = value.split(\";\")[1].strip()\n elif \"gps\" in key:\n gps_list = value.split()\n self.header_gps_stamp = MTime(\n dateutil.parser.parse(\n \" \".join(gps_list[0:2]), dayfirst=True\n )\n )\n self.header_gps_latitude = self._get_latitude(\n gps_list[2], gps_list[3]\n )\n self.header_gps_longitude = self._get_longitude(\n gps_list[4], gps_list[5]\n )\n self.header_gps_elevation = float(gps_list[6])\n elif \"run\" in key:\n self.run_id = value.replace('\"', \"\")\n else:\n setattr(self, key.replace(\" \", \"_\").replace(\"/\", \"_\"), value)", "def headersFromRawFile(self, rawFile: str, headers: Dict) -> None:\n dFile = open(os.path.join(self.dataPath, rawFile), \"r\", encoding=\"ISO-8859-1\")\n generalHeaderString = dFile.read(1000) # this should be long enough\n generalSplit = generalHeaderString.split()\n # read GENERAL HEADER\n generalHeader = {}\n generalHeader[\"recLength\"] = int(generalSplit[0])\n generalHeader[\"fileType\"] = generalSplit[1]\n generalHeader[\"wordLength\"] = int(generalSplit[2])\n generalHeader[\"version\"] = generalSplit[3]\n generalHeader[\"procId\"] = generalSplit[4]\n generalHeader[\"numCh\"] = int(generalSplit[5])\n generalHeader[\"totalRec\"] = int(generalSplit[6])\n generalHeader[\"firstEvent\"] = int(generalSplit[7])\n generalHeader[\"numEvent\"] = int(generalSplit[8])\n generalHeader[\"extend\"] = int(generalSplit[9])\n\n # read EVENT HEADER - there can be multiple of these, but normally only the one\n # Multiple events are largely deprecated. Only a single event is used\n eventHeaders = []\n fileSize = os.path.getsize(os.path.join(self.dataPath, rawFile))\n record = generalHeader[\"firstEvent\"]\n for ir in range(0, generalHeader[\"numEvent\"]):\n seekPt = (record - 1) * generalHeader[\"recLength\"]\n if not seekPt > fileSize:\n # seek from beginning of file\n dFile.seek(seekPt, 0)\n # read extra to make sure\n eventString = dFile.read(1000)\n eventSplit = eventString.split()\n eH = {}\n eH[\"start\"] = int(eventSplit[0])\n eH[\"startms\"] = int(eventSplit[1])\n eH[\"stop\"] = int(eventSplit[2])\n eH[\"stopms\"] = int(eventSplit[3])\n eH[\"cvalue1\"] = float(eventSplit[4])\n eH[\"cvalue2\"] = float(eventSplit[5])\n eH[\"cvalue3\"] = float(eventSplit[6])\n eH[\"EHInfile\"] = int(eventSplit[7])\n eH[\"nextEH\"] = int(eventSplit[8])\n eH[\"previousEH\"] = int(eventSplit[9])\n eH[\"numData\"] = int(eventSplit[10])\n eH[\"startData\"] = int(eventSplit[11])\n eH[\"extended\"] = int(eventSplit[12])\n eventHeaders.append(eH)\n if eH[\"nextEH\"] < generalHeader[\"totalRec\"]:\n record = eH[\"nextEH\"] # set to go to next eH\n else:\n break # otherwise break out of for loops\n # close the data file\n dFile.close()\n # now compare number of samples with that calculated previously\n if eventHeaders[0][\"numData\"] != headers[\"num_samples\"]:\n self.printWarning(\"Data file: {}\".format(dFile))\n self.printWarning(\n \"Number of samples in raw file header {} does not equal that calculated from data {}\".format(\n eventHeaders[0][\"numData\"], headers[\"num_samples\"]\n )\n )\n self.printWarning(\"Number of samples calculated from data will be used\")\n # set the byte offset for the file\n self.dataByteOffset[rawFile] = (\n eventHeaders[0][\"startData\"] - 1\n ) * generalHeader[\"recLength\"]\n self.recChannels[rawFile] = generalHeader[\"numCh\"]", "def _api_headers(self, previous_headers=None):\n if self._headers is None:\n auth_headers = {}\n token = self._token\n if token:\n auth_headers[\"Authorization\"] = f\"token {token}\"\n self._headers = auth_headers\n\n if previous_headers is not None:\n headers = self._headers.copy()\n for condition, key in (\n (\"If-Modified-Since\", \"Last-Modified\"),\n (\"If-None-Match\", \"ETag\"),\n ):\n try:\n headers[condition] = previous_headers[key]\n except KeyError:\n continue\n return headers\n\n return self._headers", "def _getHeaders(self, callName):\n return {\n 'X-EBAY-API-CALL-NAME': callName,\n 'X-EBAY-API-APP-NAME': settings.EBAY_API_APP_NAME,\n 'X-EBAY-API-CERT-NAME': settings.EBAY_API_CERT_NAME,\n 'X-EBAY-API-DEV-NAME': settings.EBAY_API_DEV_NAME,\n 'X-EBAY-API-SITEID': settings.EBAY_API_SITEID,\n 'X-EBAY-API-COMPATIBILITY-LEVEL': settings.EBAY_API_COMPATIBILITY_LEVEL\n }", "def unpack_header(header):\n header_values = {}\n for line in header.split('\\n'):\n tokens = line.split('=')\n if len(tokens) > 1:\n header_values[tokens[0].strip()] = tokens[1].split(';')[0].strip()\n return header_values", "def getAllHeaders():", "def fitsio_header_to_dict(hdr):\n d = {}\n for key in hdr.keys():\n if key != 'HISTORY' or key != \"COMMENT\":\n d[key.lower()] = hdr.get(key)\n return d", "def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers", "def parse_header(self): # -> list[Unknown]:\n ...", "def to_header(self) -> Dict[str, str]:\n tracer_from_context = self.get_current_tracer()\n temp_headers: Dict[str, str] = {}\n if tracer_from_context is not None:\n ctx = tracer_from_context.span_context\n try:\n temp_headers = tracer_from_context.propagator.to_headers(ctx)\n except AttributeError:\n pass\n return temp_headers", "def _parse_header(self, line):\n if self._regex_helper.search_compiled(W._re_header, line):\n if not self.headers:\n for value in re.findall(W._re_header, line):\n self.headers.append(value[0])\n raise ParsingDone\n else:\n # Dictionary which is going to be appended to the returned list\n ret = dict()\n # List of entries\n _entries = list()\n # List of values in WHAT entry\n _what_entry = list()\n for value in re.findall(W._re_header, line):\n _entries.append(value[0])\n for what_index in range(len(self.headers) - 1, len(_entries)):\n _what_entry.append(_entries[what_index])\n _what_entry_string = ' '.join(_what_entry)\n for index in range(len(self.headers)):\n if index < len(self.headers) - 1:\n ret.update({self.headers[index]: _entries[index]})\n else:\n ret.update({self.headers[index]: _what_entry_string})\n self.current_ret['RESULT'].append(ret)\n raise ParsingDone", "def extract_object_metadata_from_headers(headers):\n meta_headers = {}\n for header, value in headers.items():\n header = header.title()\n\n if (header.startswith(\"X-Object-Meta-\") or\n header.startswith(\"X-Object-Sysmeta-\") or\n header in SPECIAL_OBJECT_METADATA_HEADERS):\n\n # do not let a client pass in ORIGINAL_MD5_HEADER\n if header not in (ORIGINAL_MD5_HEADER,\n ORIGINAL_MD5_HEADER.title()):\n meta_headers[header] = value\n\n return meta_headers", "def _parse_header(lines):\n # The dict into which we will store header fields.\n header = {}\n # Loop over lines in the header.\n for line in lines:\n # Find the first colon.\n index = line.index(COLON)\n # Up to the colon is the field name.\n name = line[: index]\n # After the colon is the field value.\n value = line[index + 1 :]\n # The field value may begin or end with extra space, which is not \n # significant. Remove it.\n value = value.strip()\n # Store the field.\n header[name] = value\n # All done.\n return header", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self.headers:\n headers.update(self.headers)\n headers.update(kwargs)\n return headers", "def _get_cleaned_headers(headers):\r\n cleaned_headers = []\r\n for header in headers:\r\n # Google strips special characters, whitespace, and underscores first,\r\n # and then strips any *leading* digits. This order is extremely\r\n # important!\r\n sanitized = sub(r'^\\d+', '', sub(r'[\\W_]', '', header.lower()))\r\n if len(sanitized) > 0:\r\n cleaned_headers.append(sanitized)\r\n else:\r\n raise GoogleSpreadsheetError(\"Encountered a header '%s' that was \"\r\n \"either blank or consisted only of special characters. \"\r\n \"Could not map the header to the internal representation \"\r\n \"used by the Google Spreadsheet. Please change the header \"\r\n \"to consist of at least one alphanumeric character.\"\r\n % header)\r\n\r\n # When the same sanitized header appears multiple times in the first row\r\n # of a spreadsheet, _n is appended to the name to make it unique.\r\n header_count = defaultdict(int)\r\n results = []\r\n\r\n for header, cleaned_header in zip(headers, cleaned_headers):\r\n new_header = cleaned_header\r\n\r\n if header_count[cleaned_header] > 0:\r\n # Google's numbering starts from _2, hence the +1.\r\n new_header = '%s_%d' % (cleaned_header,\r\n header_count[cleaned_header] + 1)\r\n\r\n header_count[cleaned_header] += 1\r\n results.append(new_header)\r\n\r\n return results", "def test_headerFormatter(self):\n cases = [\n ({'Header1': 'Value1', 'Header2': 'Value2'},\n b'Header2: Value2\\r\\nHeader1: Value1\\r\\n'),\n ]\n\n for (input, expected) in cases:\n output = imap4._formatHeaders(input)\n self.assertEqual(sorted(output.splitlines(True)),\n sorted(expected.splitlines(True)))", "def get_headers(self):\r\n raise NotImplementedError", "def headers(self) -> Mapping[str, str]:\n return pulumi.get(self, \"headers\")", "def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict", "def _update_headers(self):\n if not self._header_updated:\n headers = self.head_obj(self._client, self._spec)\n self._headers.update(headers)\n self._header_updated = True", "def header(self, header, default=None):\n result = []\n header_value = self.email.get(header, default)\n if header_value:\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n\n return header_value", "def process_client_headers(self, *, scope: Scope, headers: Headers) -> Headerlike:\n return headers", "def set_column_headers(self,param_headers):\n self.cur_quotes_parm_headers = param_headers", "def _headers(self):\n\n auth_token = SendbeeAuth(self.client.api_secret).get_auth_token()\n headers = {\n 'X-Auth-Token': auth_token,\n 'X-Api-Key': self.client.api_key,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Sendbee Python API Client'\n }\n self.debug.ok('headers', headers)\n\n return headers", "def inject_headers(mime, headers):\n for key in headers:\n if key == 'Bcc' or key == 'Resent-Bcc':\n continue\n del mime[key]\n mime[key] = encode_header(headers[key])", "def get_headers(self, environ=None):\n return [('Content-Type', 'application/json')]", "def _build_headers(self):\n headers = {\n 'Authorization': 'Bearer {api_key}'.format(api_key=self._api_key),\n 'SplitSDKVersion': SDK_VERSION,\n 'Accept-Encoding': 'gzip'\n }\n\n if self._split_sdk_machine_name is not None:\n headers['SplitSDKMachineName'] = self._split_sdk_machine_name() \\\n if callable(self._split_sdk_machine_name) else self._split_sdk_machine_name\n\n if self._split_sdk_machine_ip is not None:\n headers['SplitSDKMachineIP'] = self._split_sdk_machine_ip() \\\n if callable(self._split_sdk_machine_ip) else self._split_sdk_machine_ip\n\n return headers", "def _build_http_header(self) -> Dict[str, str]:\n return {}", "def _get_headers(self, request):\n headers = {}\n for key, value in request.META.items():\n if key.startswith('HTTP_') and key != 'HTTP_HOST':\n headers[key[5:].replace('_', '-')] = value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH') and value:\n headers[key.replace('_', '-')] = value\n\n if request.user:\n headers['PARTNER-EMAIL'] = request.user.email\n lang = self._get_lang(request)\n if lang:\n headers['ACCEPT-LANGUAGE'] = lang\n return headers", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def _format_header(self):\n return self._format_dict(self.header)", "def add_custom_headers(self, headers):\n headers_to_remove = [x for x in headers if x.lower() in [y.lower() for y in self.headers]]\n for header in headers_to_remove:\n headers.pop(header, None)\n headers.update(self.headers)", "def process_result_headers(result_headers):\n class_attrib_re = re.compile(r'\\s*class=\"([^\"]+)\"')\n\n for header in result_headers:\n m = class_attrib_re.match(header['class_attrib'])\n\n if m:\n class_value = m.groups(1)[0]\n else:\n class_value = ''\n\n if class_value != 'action-checkbox-column':\n class_value = 'has-label %s' % class_value\n\n header['class_attrib'] = \\\n mark_safe(' class=\"datagrid-header %s\"' % class_value)\n\n if header['sortable'] and header['sort_priority'] > 0:\n if header['ascending']:\n sort_order = 'asc'\n else:\n sort_order = 'desc'\n\n if header['sort_priority'] == 1:\n sort_priority = 'primary'\n else:\n sort_priority = 'secondary'\n\n header['sort_icon'] = 'datagrid-icon-sort-%s-%s' % (\n sort_order, sort_priority)\n\n return ''", "def _format_header(fields):\n # The list into which we will collect header lines.\n lines = []\n for name, value in fields.items():\n # A header line looks like, \"name: value\".\n line = name + COLON + SPACE + value\n # Add this line to the list.\n lines.append(line)\n return lines", "def put_headers_in_environ(headers, environ):\n for key, value in headers:\n environ['HTTP_%s' % key.upper().replace('-', '_')] = value", "def __get_headers(self):\n\n return {}", "def sanitize_headers(headers):\n auth_header = headers.pop(\"Authorization\", None)\n if auth_header:\n _logger.warning(\n f\"Possible fraud: Authorization header was set to {auth_header}\"\n )\n userinfo_header = headers.pop(\"X-Userinfo\", None)\n if userinfo_header:\n _logger.warning(\n f\"Possible fraud: X-Userinfo header was set to {userinfo_header}\"\n )", "def headers():\n return {\n 'user-agent': 'integration-tester',\n 'content-type': 'application/json',\n }", "def make_headers(self):\n return {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US;\\\n rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}", "def currentSBHeaders(self):\n headers = {}\n for header_name in [options['Headers','classification_header_name'],\n options['Headers','mailid_header_name'],\n options['Headers','classification_header_name'] + \"-ID\",\n options['Headers','thermostat_header_name'],\n options['Headers','evidence_header_name'],\n options['Headers','score_header_name'],\n options['Headers','trained_header_name'],\n ]:\n value = self[header_name]\n if value is not None:\n headers[header_name] = value\n return headers", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def responseheaders(self, flow: mitmproxy.http.HTTPFlow):", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def from_incoming_header(cls, header):\n # type: (Optional[str]) -> Baggage\n sentry_items = {}\n third_party_items = \"\"\n mutable = True\n\n if header:\n for item in header.split(\",\"):\n if \"=\" not in item:\n continue\n\n with capture_internal_exceptions():\n item = item.strip()\n key, val = item.split(\"=\")\n if Baggage.SENTRY_PREFIX_REGEX.match(key):\n baggage_key = unquote(key.split(\"-\")[1])\n sentry_items[baggage_key] = unquote(val)\n mutable = False\n else:\n third_party_items += (\",\" if third_party_items else \"\") + item\n\n return Baggage(sentry_items, third_party_items, mutable)", "def requestheaders(self, flow: mitmproxy.http.HTTPFlow):", "def headers(self):\n return Dict(**self._get_headers())", "def parse_cookies( headers ):", "def generate_headers(self):\n raise NotImplementedError()", "def headers(self):\r\n return dict(**self._get_headers())", "def extract_from_header(headers, header_type):\n LOG.debug(f\"[extract_from_header] extracting `{header_type}` from headers: {headers}\")\n if headers is None:\n return\n for header in headers:\n if header_type in header:\n for item in header:\n if item == header_type or item is None:\n continue\n else:\n return item.decode(\"ascii\")\n return" ]
[ "0.6760923", "0.6707738", "0.66021603", "0.6435935", "0.62480223", "0.62002486", "0.6085462", "0.60607", "0.605833", "0.6038865", "0.5981734", "0.5961042", "0.5954765", "0.59422773", "0.5939061", "0.5936525", "0.5867014", "0.5862857", "0.5862484", "0.586054", "0.5848212", "0.57505476", "0.57465124", "0.57360345", "0.5724249", "0.5721163", "0.57163894", "0.57017416", "0.57010096", "0.56939006", "0.5686147", "0.5672436", "0.566871", "0.5662251", "0.56598806", "0.5645679", "0.564502", "0.560429", "0.5604171", "0.5601218", "0.5600892", "0.55955595", "0.5591944", "0.55608284", "0.55254924", "0.5510758", "0.55077505", "0.54987484", "0.54953444", "0.5478886", "0.54753315", "0.54685664", "0.54584146", "0.5457679", "0.54543036", "0.54363126", "0.5431733", "0.543093", "0.54258955", "0.5421413", "0.5418677", "0.5413127", "0.54117227", "0.5410222", "0.54073024", "0.53905004", "0.53892463", "0.5381245", "0.5368328", "0.5360434", "0.5358165", "0.53565854", "0.53419673", "0.5338328", "0.53236246", "0.53212434", "0.53082466", "0.5306761", "0.5304595", "0.5296047", "0.52953863", "0.5282326", "0.5277068", "0.5268837", "0.52661616", "0.5258837", "0.5254299", "0.5248292", "0.524247", "0.524247", "0.52399015", "0.523759", "0.523759", "0.5235847", "0.5233139", "0.5226221", "0.5222904", "0.521974", "0.5215237", "0.5215113" ]
0.7734185
0
Calculate the similarity based on Cosine Similarity between two CTRDMs
def cosinesimilarity_cal(CTRDM1, CTRDM2): # get number of conditions n_cons = np.shape(CTRDM1)[0] # calculate the number of value above the diagonal in RDM n = n_cons * (n_cons - 1) # initialize two vectors to store the values above the diagnal of two RDMs v1 = np.zeros([n], dtype=np.float64) v2 = np.zeros([n], dtype=np.float64) # assignment nn = 0 for i in range(n_cons): for j in range(n_cons): if i != j: v1[nn] = CTRDM1[i, j] v2[nn] = CTRDM2[i, j] nn = nn + 1 # calculate the Cosine Similarity V1 = np.mat(v1) V2 = np.mat(v2) num = float(V1 * V2.T) denom = np.linalg.norm(V1) * np.linalg.norm(V2) cos = num / denom similarity = 0.5 + 0.5 * cos return similarity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cosineSimilarity(dict1,dict2):\n product1 = 0.0\n product2 = 0.0\n for key in dict1.keys():\n product1 += (dict1[key] * dict1[key])\n for key in dict2.keys():\n product2 += (dict2[key] * dict2[key])\n product1 = math.sqrt(product1)\n product2 = math.sqrt(product2)\n fenmu = product1 * product2\n fenzi = 0.0\n for key in dict1.keys():\n if key in dict2:\n fenzi += (dict1[key] * dict2[key])\n cosSim = fenzi / fenmu\n return cosSim", "def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)", "def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def cosin_sim_pairs(a, b):\n wordsA = set(a.keys())\n wordsB = set(b.keys())\n inter = wordsA.intersection(wordsB)\n if(len(inter) == 0):\n return 0.0\n aa, bb, ab = 0, 0, 0\n for k in inter:\n aa += a[k] ** 2\n bb += b[k] ** 2\n ab += a[k] * b[k]\n for k in wordsA - inter:\n aa += a[k] ** 2\n for k in wordsB - inter:\n bb += b[k] ** 2\n return ab / float(math.sqrt(aa) * math.sqrt(bb))", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def cosine_similarity(v1, v2):\n v1_len = 0\n v2_len = 0\n dot_product = 0\n\n for context_id, count in v1.items():\n v1_len += count ** 2\n if context_id in v2:\n dot_product += count*v2[context_id]\n for count in v2.values():\n v2_len += count ** 2\n\n v1_len = math.sqrt(v1_len)\n v2_len = math.sqrt(v2_len)\n return dot_product/(v1_len * v2_len)", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def test_cossim(self):\n metrics = SimilarityMetrics()\n test1 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test1,0.0)\n\n test2 = metrics.cosine_similarity(np.asarray([1,-1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test2,-1.0)\n\n test3 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([1,1]))\n np.testing.assert_almost_equal(test3,1.0)", "def cosine_dist(d1, d2):\n suma=0\n for x in d1:\n if x in d2:\n suma+=(d1[x]*d2[x])\n sqrt1=0\n sqrt2=0\n for i in d1:\n sqrt1+=math.pow(d1[i],2)\n for i in d2:\n sqrt2+=math.pow(d2[i],2)\n return 1-suma/(math.sqrt(sqrt1)*math.sqrt(sqrt2))", "def cosine_similarity_tensorflow(tf_word_representation_A, tf_words_representation_B):\n a_normalized = tf.nn.l2_normalize(tf_word_representation_A, axis=-1)\n b_normalized = tf.nn.l2_normalize(tf_words_representation_B, axis=-1)\n similarity = tf.reduce_sum(\n tf.multiply(a_normalized, b_normalized), \n axis=-1\n )\n \n return similarity", "def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))", "def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)", "def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def cosine_similarity(a, b):\n\n numerator = tf.reduce_sum(tf.multiply(a, b), axis=1)\n denominator = tf.multiply(tf.norm(a, axis=1), tf.norm(b, axis=1))\n cos_similarity = numerator/denominator\n return cos_similarity", "def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)", "def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])", "def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def calculate_cosine_dist(main_text, new_text):\n wordbag = set(\" \".join([main_text, new_text]).split(\" \"))\n dot_prod = 0\n main_text = main_text.split(\" \")\n new_text = new_text.split(\" \")\n\n for word in wordbag:\n if word in main_text and word in new_text:\n # only worth looking at if word is in both. Otherwise dot prod = 0\n count_A = sum(np.array(main_text) == word)\n count_B = sum(np.array(new_text) == word)\n dot_prod += count_A * count_B\n\n return float(dot_prod) / (len(main_text) * len(new_text))", "def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r", "def _do_action_calculate_similarity_cosine_express(self):\n self._run_express_job(\"com.directv.recommend.express.CosineCFTrainer\")\n self._scan_table(\"content/item_item_similarities\")", "def cosine_sim_counters(a, b):\n union_ab = sorted((a | b).keys())\n veca = np.array([a[element] if element in a else 0 for element in union_ab])\n vecb = np.array([b[element] if element in b else 0 for element in union_ab])\n return np.dot(veca, vecb) / (np.linalg.norm(veca) * np.linalg.norm(vecb))", "def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))", "def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:\n return cosine_similarity(word2vec.wv.vectors)", "def cosine_similarity(a, b):\n if a.ndim != 1 or b.ndim != 1:\n raise InvalidShapeException(a,b)\n\n if len(a) != len(b):\n raise InvalidLengthException(a,b)\n \n mag_a = np.linalg.norm(a)\n mag_b = np.linalg.norm(b)\n\n return np.dot(a,b)/(mag_a*mag_b)", "def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))", "def w2v_sim(self, s1, s2):\n v1 = self.word2vec.get_centroid_vector(s1)\n v2 = self.word2vec.get_centroid_vector(s2)\n return self.__cos_sim(v1, v2)", "def similarity(centroid_a, centroid_b):\n \n vector_a = centroid_a.centroid_vector\n vector_b = centroid_b.centroid_vector\n \n length_a = centroid_a.length\n length_b = centroid_b.length\n \n dotproduct = 0.0\n\n for key, value in vector_a.iteritems():\n if key in vector_b: # if both vectors have the key\n dotproduct += (value * vector_b[key])\n\n return float(dotproduct / (length_a * length_b))", "def cosine_sim_collections(a, b):\n setab = sorted(set(a) | set(b))\n countera, counterb = Counter(a), Counter(b)\n veca = [countera[element] if element in a else 0 for element in setab]\n vecb = [counterb[element] if element in b else 0 for element in setab]\n return dot(veca, vecb) / (norm(veca) * norm(vecb))", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))", "def __modelSimilarity(self, mOrig: nn.Module, mDest: nn.Module) -> torch.Tensor:\n cos = nn.CosineSimilarity(0)\n d1 = nn.utils.parameters_to_vector(mOrig.parameters())\n d2 = nn.utils.parameters_to_vector(mDest.parameters())\n sim: torch.Tensor = cos(d1, d2)\n return sim", "def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)", "def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum", "def cosine_sim(a: np.ndarray, \n b: np.ndarray \n ) -> float:\n return (\n 1 + a.dot(b) / \n (np.linalg.norm(a)*np.linalg.norm(b))\n ) / 2", "def __cos_sim(self, v1, v2):\n if np.count_nonzero(v1) == 0 or np.count_nonzero(v2) == 0:\n # whenever at least one of the vectors is all zeros, spatial.distance.cosine will fail by returning nan\n ret = 0\n else:\n ret = 1 - spatial.distance.cosine(v1, v2)\n return ret", "def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)", "def cos_sim(a, b):\n numerator, sosA, sosB = 0, 0, 0 #sum of squares for denom\n for i in movies: #replace movies\n numerator += (ratingai * ratingbi)\n sosA += ratingai * ratingai\n sosB += ratingbi * ratingbi\n denom = sqrt(sosA * sosB) #make sure denom won't be 0!\n return numerator / denom", "def cosine_similarity(u, v):\n\n distance = 0.0\n\n ### START CODE HERE ###\n # Compute the dot product between u and v (≈1 line)\n dot = np.dot(u, v)\n # Compute the L2 norm of u (≈1 line)\n norm_u = np.sqrt(np.dot(u, u))\n\n # Compute the L2 norm of v (≈1 line)\n norm_v = np.sqrt(np.dot(v, v)) ##np.linalg.norm(u)\n # Compute the cosine similarity defined by formula (1) (≈1 line)\n cosine_similarity = dot / (norm_u * norm_v)\n ### END CODE HERE ###\n\n return cosine_similarity", "def wordSimilarityRatio(sent_1,sent_2):", "def compute_similarity(self, text1, text2):\n\n text1_dist = self.predict(text1)[0]\n text2_dist = self.predict(text2)[0]\n return jensenshannon(text1_dist, text2_dist)", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def compute_cosine_similarity(base_vector, target_vector):\n\n np.seterr(all='print')\n cosine_similarity = 0\n\n try:\n base_vector = np.longdouble(base_vector)\n target_vector = np.longdouble(target_vector)\n vector_dot_products = np.dot(base_vector, target_vector)\n vector_norms = np.linalg.norm(base_vector) * np.linalg.norm(target_vector)\n cosine_similarity = np.divide(vector_dot_products, vector_norms)\n\n if vector_norms == 0.0:\n print 'Error in vec in compute_cosine_similarity'\n print target_vector\n\n except Exception, e:\n print(str(e))\n\n return cosine_similarity", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def cosine(arr1, arr2):\n\n if arr1 is None or arr2 is None:\n return np.NaN\n if not isinstance(arr1, list):\n arr1 = [arr1]\n if any(pd.isnull(arr1)):\n return np.NaN\n if not isinstance(arr2, list):\n arr2 = [arr2]\n if any(pd.isnull(arr2)):\n return np.NaN\n # Create cosine measure object\n measure = sm.Cosine()\n # Call the function to compute the cosine measure.\n return measure.get_raw_score(arr1, arr2)", "def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)", "def similarity(self, query, documents):\n\n bow_query = self.dictionary.doc2bow(query)\n bow_docs = [self.dictionary.doc2bow(document) for document in documents]\n\n index = SoftCosineSimilarity(bow_docs, self.matrix)\n similarities = index[bow_query]\n\n return similarities", "def _cosine_similarity_update(preds: Tensor, target: Tensor) ->Tuple[Tensor, Tensor]:\n _check_same_shape(preds, target)\n preds = preds.float()\n target = target.float()\n return preds, target", "def cos_sim(vec1, vec2):\n if len(vec1) != len(vec2):\n print 'dimension does not agree.'\n numerator_sum = 0 \n for i in range(len(vec1)):\n numerator_sum = numerator_sum + vec1[i]*vec2[i]\n \n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n \n return numerator_sum/denom", "def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)", "def similarity_euclid(matrix, business1, business2):\n selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()\n\n if not selected_features.any():\n return 0\n\n features1 = matrix.loc[business1][selected_features]\n features2 = matrix.loc[business2][selected_features]\n distance = math.sqrt(((features1 - features2) ** 2).sum())\n\n if distance is np.nan:\n return 0\n\n return 1 / (1 + distance)", "def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim", "def cosine_similarity(user, business):\n return len(user.intersection(business)) / (math.sqrt(len(user)) * math.sqrt(len(business)))", "def calculate_similarities(self) -> List[float]:\n sims = list()\n for i, r in self.sim_data.iterrows():\n if isinstance(self.model, FastTextWrapper):\n vecs = self.model.inference([r[\"Word 1\"], r[\"Word 2\"]])\n else:\n vecs = self.model.inference_batches([[r[\"Word 1\"]], [r[\"Word 2\"]]])\n vecs = [x[0] for x in vecs]\n if len(vecs) == 2:\n s = cosine_similarity([vecs[0]], [vecs[1]])[0][0]\n sims.append(s)\n else:\n sims.append(np.nan)\n self.sim_data[\"assigned_sim\"] = sims\n self.sim_data = self.sim_data.dropna()\n self.mean_error()\n self.correlation()\n return sims", "def cos_sim(v1, v2):\r\n return np.inner(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def findCosineDistance(vector1, vector2):\n vec1 = vector1.flatten()\n vec2 = vector2.flatten()\n\n a = np.dot(vec1.T, vec2)\n b = np.dot(vec1.T, vec1)\n c = np.dot(vec2.T, vec2)\n return 1 - (a / (np.sqrt(b) * np.sqrt(c)))", "def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)", "def cossim(corpus):\n files = os.listdir()\n vectorizer = TfidfVectorizer()\n trsfm = vectorizer.fit_transform(corpus)\n columns = vectorizer.get_feature_names()\n df_tfidf = pd.DataFrame(trsfm.toarray(), columns = columns, index = corpus)\n out = cosine_similarity(trsfm)\n df_result = pd.DataFrame(out, columns = files, index = files)\n return df_result", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def main():\n\n measures = Similarity()\n\n input1=sys.argv[1]\n vect1=np.loadtxt(fname = input1)\n \n input2=sys.argv[2]\n vect2=np.loadtxt(fname = input2)\n\n print measures.cosine_similarity(normBySum(vect1), normBySum(vect2))\n \n\n\n #print measures.cosine_similarity2(vect1, vect2)\n\n #print measures.jaccard_similarity([0,1,2,5,6],[0,2,3,5,7,9])", "def get_cosine_similarities(keywords: List[str],\n matrices: Matrices,\n word2onehot: Dict[str, int]\n ) -> None:\n for i in combinations(keywords, 2):\n print(i[0], i[1], cosine_sim(\n matrices.embedding[\n word2onehot[i[0]]], matrices.embedding[word2onehot[i[1]]\n ]\n ))", "def predict_cosine_similarities(sess, word_A, words_B):\n\n word_A_id, _ = sentence_to_word_ids(word_A, word_to_index)\n words_B_ids, split_sentence = sentence_to_word_ids(words_B, word_to_index)\n\n evaluated_cos_similarities = sess.run(\n cosine_similarities, \n feed_dict={\n tf_word_A_id: word_A_id,\n tf_words_B_ids: words_B_ids\n }\n )\n return evaluated_cos_similarities, split_sentence", "def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0", "def test_cosine_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def calculate_similarity(self, tfidf_matrix, test_tfidf):\n\n with open(DATASET.fold_root / 'tags_order.json') as file:\n tags_order = json.load(file)\n\n min_max_scaler = MinMaxScaler()\n\n n_clus = 2\n simis = []\n for test_q in test_tfidf:\n s = cosine_similarity(tfidf_matrix, test_q)\n\n # Sorting and getting indices of sorted similarities\n simi = s.transpose()[0]\n simi_values = np.sort(simi)[::-1][:200]\n simi_indices = simi.argsort()[::-1]\n\n breaks = jenkspy.jenks_breaks(simi_values, n_clus)\n simi_count = len(simi_values[breaks[-2] <= simi_values])\n\n q_tags = [self.train_set[i].tags for i in simi_indices][:simi_count]\n\n tags_votes = Counter(chain(*q_tags))\n all_count = sum(tags_votes.values())\n tags_likelihood = [tags_votes.get(\n tag, 0) / all_count for tag in tags_order]\n\n lh = np.array([float(x)\n for x in tags_likelihood]).reshape(-1, 1)\n normalized_lh = np.concatenate(\n min_max_scaler.fit_transform(lh)\n ).tolist()\n\n simis.append(normalized_lh)\n\n return simis", "def cosine_similarity(y_true, y_pred, axis=-1):\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return -math_ops.reduce_sum(y_true * y_pred, axis=axis)", "def get_cosine_similarity(self, query: list):\n question_vector = self.get_vector(query)\n\n return cosine_similarity(question_vector, self.vectors).flatten()", "def test_cosine_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def cos_sim(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float]) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cosine_similarity(X):\n matrix = X.dot(X.transpose()).todense()\n mat_len = len(matrix)\n norms = [0] * mat_len\n for i in range(0, mat_len):\n norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))\n norm_mat = np.matrix(norms)\n return np.multiply(norm_mat.transpose().dot(norm_mat), matrix)", "def cosine_similarity(document, cluster):\n num = dot(document.vector, cluster.centroid)\n den = linalg.norm(document.vector) * linalg.norm(cluster.centroid)\n\n return num / den", "def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def cosine_collection_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x1` and columns of `x2` transpose\n cos_thetas = tf.linalg.matmul(x1, x2, transpose_b=True)\n pairwise_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n pairwise_distances = tf.maximum(pairwise_distances, 0.0)\n\n return pairwise_distances", "def cosine_sim(im, s):\n return im.mm(s.t()) #image.mm(sentence.t()) & mm() Performs a matrix multiplication of the matrices ", "def cosine_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x_1` and rows of `x_2`\n # \"ij,ij->i\" := output[i] = sum_j x1[i, j] * x2[i, j]\n cos_thetas = tf.linalg.einsum(\"ij,ij->i\", x1, x2)\n cos_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n cos_distances = tf.maximum(cos_distances, 0.0)\n\n return cos_distances", "def cos_sim(com_feat,ref_feat):\n # Fill this in\n a = numpy.squeeze(com_feat)\n b = numpy.squeeze(ref_feat)\n return numpy.dot(a, b) / (numpy.linalg.norm(a) * numpy.linalg.norm(b))", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def similarity(self, e1, e2):\n\t\tpass", "def get_dist(text1, text2, wv):\n t1 = lookup(text1, wv)\n t2 = lookup(text2, wv)\n dist = cos_sim(t1, t2)\n return dist", "def idf_modified_cosine(x, y, idf):\n result = 0\n try:\n tf_x = [dict([word, int(tf)] for word, tf in dict(\n np.array(np.unique(x, return_counts=True)).T).items())][0]\n tf_y = [dict([word, int(tf)] for word, tf in dict(\n np.array(np.unique(y, return_counts=True)).T).items())][0]\n result = sum([tf_x[w] * tf_y[w] * (idf[w]**2)\n\t\t for w in tf_x.keys() & tf_y.keys()]) / ((\n sum([(tf_x[w] * idf[w])**2\n for w in tf_x.keys()])**0.5) * (\n sum([(tf_y[w] * idf[w])**2\n for w in tf_y.keys()])**0.5))\n except:\n print(r'x:', x, r'y:', y)\n pass\n return result", "def test_cosine_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def _arccosine(self, s1, s2, tf_embs):\n tf_pi = tf.constant(np.pi, dtype=tf.float64)\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')\n norms1 = tf.gather(tf_norms, s1)\n norms2 = tf.gather(tf_norms, s2)\n dot = tf.matmul(mat1, tf.transpose(mat2))\n norms = tf.matmul(norms1, tf.transpose(norms2))\n # We clip values due to numerical errors\n # which put some values outside the arccosine range.\n cosine = tf.clip_by_value(dot / norms, -1, 1)\n angle = tf.acos(cosine)\n # The 0 vector has norm 0, which generates a NaN.\n # We catch these NaNs and replace them with pi,\n # which ends up returning 0 similarity.\n angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)\n return 1 - (angle / tf_pi)" ]
[ "0.7754487", "0.7676584", "0.7633492", "0.76006675", "0.7589376", "0.7576636", "0.75592244", "0.75373095", "0.7522885", "0.746666", "0.74491453", "0.7418762", "0.7398554", "0.73664916", "0.7334395", "0.7312988", "0.7309643", "0.7292841", "0.72754246", "0.7240708", "0.72226435", "0.7130961", "0.7123031", "0.70562273", "0.70501834", "0.70479953", "0.7040821", "0.7039914", "0.70334846", "0.70334363", "0.70153224", "0.7008114", "0.69522464", "0.6934286", "0.6928512", "0.6926593", "0.6879386", "0.6864113", "0.6863202", "0.68610185", "0.68464726", "0.6837238", "0.6828644", "0.68274385", "0.6794438", "0.67816436", "0.67760855", "0.675313", "0.6740118", "0.67280334", "0.67219496", "0.67148787", "0.67109823", "0.6707885", "0.6702007", "0.6698139", "0.66927254", "0.66927254", "0.66871613", "0.6682697", "0.6681088", "0.6679727", "0.6679382", "0.6677626", "0.6674533", "0.6638071", "0.6621222", "0.6613444", "0.6607373", "0.6603937", "0.660229", "0.6587089", "0.65855116", "0.65817654", "0.65788", "0.65788", "0.6578797", "0.6570616", "0.65550965", "0.6543188", "0.65381354", "0.6532197", "0.65292716", "0.65252316", "0.6522049", "0.65192133", "0.65176344", "0.6512081", "0.6507807", "0.6505985", "0.65048015", "0.650079", "0.649635", "0.64901906", "0.6489436", "0.6488032", "0.6485602", "0.6464009", "0.64575744", "0.6450351" ]
0.80601525
0
Adds basic_vector to the basic vectors. If there are at least 3 arrays in _basic_vectors, then add a new array to _featureVector. This added array is composed of the basic vectors and its 2 first central derivatives basic_vector must be the array returned by the mfcc.
def build_feature_vector(self, basic_vector): basic_vector = basic_vector - np.mean(basic_vector) self._basic_vectors.append(basic_vector) if len(self._basic_vectors) > 2: #if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one first_derivative = (basic_vector - self._basic_vectors[-3])/(2*self.seconds_to_next_vector) second_derivative = (basic_vector - 2*self._basic_vectors[-2] + self._basic_vectors[-3])/(self.seconds_to_next_vector**2) feature_vector = np.concatenate((basic_vector, first_derivative, second_derivative)) self._feature_vectors.append(feature_vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vector(self, doc_name, add_cnt, new_docvec):\n \n # determine the weight of the merging pieces\n old_weight = float(self.vector_cnt) / (self.vector_cnt + add_cnt)\n new_weight = float(add_cnt) / (self.vector_cnt + add_cnt)\n \n if len(self.name) == 0:\n self.name = doc_name\n else:\n self.name += \", %s\" % doc_name\n \n # computes magnitude as it goes.\n self.length = 0\n \n # reduce weight of values already in vector\n for key in self.centroid_vector.keys():\n if key in new_docvec: # if is in both vectors!\n \n oldvalue = float(self.centroid_vector[key]) * old_weight\n newvalue = float(new_docvec[key]) * new_weight\n value = oldvalue + newvalue\n \n self.centroid_vector[key] = value\n self.length += (value * value) # magnitude\n \n # so when we go through to add in all the missing ones we won't \n # have excess.\n del new_docvec[key]\n else: # if it is strictly in the old vector\n \n oldvalue = float(self.centroid_vector[key]) * old_weight\n self.centroid_vector[key] = oldvalue\n self.length += (oldvalue * oldvalue) # magnitude\n \n # add new values to vector\n for key, value in new_docvec.iteritems():\n # we don't so we'll have to create a new value with the weight of \n # the added vector\n value = float(value) * new_weight\n self.centroid_vector[key] = value\n self.length += (value * value)\n\n self.vector_cnt += add_cnt\n\n # calculate magnitude\n self.length = sqrt(self.length)", "def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector", "def _add_support_vectors(self, x: np.ndarray, y: np.ndarray) -> None:\n\n n_vectors = x.shape[0]\n\n self.support_vectors = np.vstack([self.support_vectors, x])\n self.alpha = np.append(self.alpha, np.zeros(n_vectors))\n self.target = np.append(self.target, y)\n\n new_kernel_values = self._kernel(x, self.support_vectors)\n\n self.kernel_mx = np.vstack([self.kernel_mx, new_kernel_values[:, :-n_vectors]])\n self.kernel_mx = np.hstack([self.kernel_mx, new_kernel_values.T])\n\n gradient = y - new_kernel_values.dot(self.alpha)\n self.gradient = np.append(self.gradient, gradient)\n\n a = y * self.c\n a[a > 0] = 0\n self.a = np.append(self.a, a)\n\n b = y * self.c\n b[b < 0] = 0\n self.b = np.append(self.b, b)", "def append(self, vector):\n self._vectors.append(Vec2(*vector))", "def create_feature_vector(self, files=[], name=\"\"):\n\n if( len(files)==0 ):\n return\n\n epsilon = 1e-8\n set = []\n\n #iterating all files obtaining the significant data to compute the feature vectors\n for file in files:\n\n #reading the csv files and keeping the first 3 columns (x,y,time)\n file_data = pd.read_csv(file)\n file_data = file_data.to_numpy()\n data = np.zeros((file_data.shape[0],7))\n data[:,0:3] = file_data[:,0:3]\n\n #computing the other interesting features\n angle = np.arctan(data[:,1]/(data[:,0]+epsilon))\n velocity = np.sqrt( np.square(data[:,1]) + np.square(data[:,0]) )\n log_curvature = np.log10( velocity/(angle+epsilon) )\n acceleration = np.sqrt( np.square(velocity) + np.square(velocity*angle) )\n\n #assigning the new computed features\n data[:,3] = angle\n data[:,4] = velocity\n data[:,5] = log_curvature\n data[:,6] = acceleration\n\n #normalizing the data\n data = self.normalization(data)\n set.append(data)\n\n return set", "def feature_vector1(self, feature_vector1):\n\n self._feature_vector1 = feature_vector1", "def add(self, featVect, label):\n if label in self.labelToNum:\n l = self.labelToNum[label]\n else:\n l = len(self.numToLabel)\n self.numToLabel.append(label)\n self.labelToNum[label] = l\n \n self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l]))", "def augment_feature_vector(X):\n column_of_ones = np.zeros([len(X), 1]) + 1\n\n return np.hstack((column_of_ones, X))", "def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])", "def apply_new_basis(new_base, vector_array):\n return np.dot(new_base, vector_array).T", "def register_vectors(self, vectors):\n\n self.vectors.extend(vectors)", "def add(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)", "def add(self, vector):\n self.x += vector.x\n self.y += vector.y", "def concatenation(self, StdVectorFst other):\n cdef StdVectorFst result = self.copy()\n result.concatenate(other)\n return result", "def __add__(self, vector):\n return self.translated(vector)", "def feature_vector2(self, feature_vector2):\n\n self._feature_vector2 = feature_vector2", "def project_vectors(self, vectors):\n X = vectors - self._mean\n return np.dot(X, self.components.T)", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def add_basic_block(self, basic_block):\n self.basic_blocks.append(basic_block)\n basic_block.function = self", "def _update_feature_vec(fvec, word, tag_ngram):", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def get_feature_vector(self, feature_idxs):\n feat_vec = np.zeros(len(feature_idxs))\n if hasattr(self, 'generic'):\n generics = self.generic\n else:\n generics = dict()\n for feature in self.features:\n key = generics.get(feature, feature)\n if key in feature_idxs:\n feat_vec[feature_idxs[key]] = self.features[feature]\n return feat_vec", "def to_basic_block(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_to_basic_block(self)", "def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect", "def add_feature(x, x1):\n if x is None:\n x = x1\n else:\n x = np.concatenate((x, x1), axis=1)\n return x", "def __init__(self, feature_vectors):\n # Initialize key variables\n (rows, _) = feature_vectors.shape\n\n # Append a column of ones to array\n ones = np.ones((rows, 1))\n kessler_array = np.hstack((ones, feature_vectors))\n\n # Make array available to rest of class\n self.data = kessler_array.tolist()", "def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def get_augmented_feature_vectors(feature_vectors):\n augmented = []\n for i, vector in enumerate(feature_vectors): # each vector is a list of tuples\n topics = [tup[0] for tup in vector]\n for t in range(7): # I finally settled on 7 topics\n if t not in topics:\n feature_vectors[i].append((t, 0))\n new_feature_vector = sorted(feature_vectors[i], key=lambda tup: tup[0])\n augmented.append([tup[1] for tup in new_feature_vector])\n return augmented", "def add_feature(X, feature_to_add):\n from scipy.sparse import csr_matrix, hstack\n return hstack([X, csr_matrix(feature_to_add).T], 'csr')", "def add_vector_fields(attributes, data):\n for attrib in attributes:\n if attrib['similarity'] == 'Semantic USE':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal['rep'] = getVector(value)\n data[attrib['name']] = newVal\n elif attrib['similarity'] == 'Semantic SBERT':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal['rep'] = getVectorSemanticSBERT(value)\n data[attrib['name']] = newVal\n elif attrib['similarity'] == 'Array SBERT':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal[\"rep\"] = []\n array = getVectorSemanticSBERTArray(value)\n for element in array:\n temp = {}\n temp['rep'] = element\n newVal[\"rep\"].append(temp)\n\n data[attrib['name']] = newVal\n return data", "def add_vectors(coord, vector):\n return tuple(c1+c2 for c1,c2 in zip(coord, vector))", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def add_mesh_features(self, file_list):\n self.geom.add_features(file_list)", "def f_vector(self):\n try:\n return self._f_vector\n except AttributeError:\n self._f_vector = vector(ZZ,[len(x) for x in self.face_lattice().level_sets()])\n return self._f_vector", "def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]", "def append(self, featureName, featureData):\n self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)\n self.featureNames = np.append(self.featureNames, featureName)\n return 0", "def sum_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] + vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def put_vector(self, term, vector):\n self.terms.append(term)\n self.vectors.append(vector.vector)\n self.real_vectors.append(vector)\n return self.dict.update({term: vector})", "def __add__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj + other\n newValue = self.value + other.value\n\n return self._newMV(newValue)", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def __add__(self, other):\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self\n self._check_vector(other)\n return Vector(self.args + other.args)", "def extend(self, iterable):\n self._vectors.extend(Vec2(*vector) for vector in iterable)", "def update(self, clip_id, feature_vec=None, is_background=False,\n timeout=None):\n with self._rw_lock.write_lock(timeout):\n clip_id = int(clip_id)\n if feature_vec is not None and \\\n not (feature_vec.ndim == 1\n and len(feature_vec) == self._feature_mat.shape[1]):\n raise ValueError(\"Given feature vector not compatible \"\n \"(dimensionality or length does not match)\")\n\n # Update the given feature vector and kernel distances\n # if self._cid2idx_map.get(clip_id, None) is not None:\n if clip_id in self._cid2idx_map:\n # In all cases, update the background status of the clip\n if is_background:\n self._bg_clip_ids.add(clip_id)\n else:\n self._bg_clip_ids.discard(clip_id)\n\n # If we were given a new feature vector, update entries\n if feature_vec is not None:\n idx = self._cid2idx_map[clip_id]\n self._feature_mat[idx] = feature_vec\n new_dist = np.mat(tuple(\n self._histogram_intersection_distance(feature_vec, fv)\n for fv in self._feature_mat\n ))\n self._kernel_mat[idx, :] = new_dist\n self._kernel_mat[:, idx] = new_dist\n\n # Given a new clip id to add, must have feature to add.\n else:\n if feature_vec is None:\n raise ValueError(\"Update given a new clip ID, but no \"\n \"feature vector. Feature vectors are \"\n \"required with new IDs.\")\n\n # Update internal feature matrix with added vector\n self._cid2idx_map[clip_id] = self._id_vector.size\n self._id_vector.resize((self._id_vector.size + 1,),\n refcheck=False)\n self._id_vector[-1] = clip_id\n\n if is_background:\n self._bg_clip_ids.add(clip_id)\n\n # noinspection PyUnresolvedReferences\n if self._feature_mat.base is not None:\n raise RuntimeError(\"Feature matrix does not own its data\")\n # Since we're only adding a new row, this resize does not affect\n # the positioning of the existing data.\n self._log.debug(\"Updating feature matrix...\")\n # noinspection PyUnresolvedReferences\n self._feature_mat.resize((self._feature_mat.shape[0] + 1,\n self._feature_mat.shape[1]),\n refcheck=False\n )\n self._feature_mat[-1, :] = feature_vec\n\n # Need to add a new row AND column to the distance kernel.\n self._log.debug(\"Updating kernel matrix...\")\n if self._kernel_mat.base is not None:\n raise RuntimeError(\"kernel matrix does not own its data\")\n assert self._kernel_mat.shape[0] == self._kernel_mat.shape[1], \\\n \"kernel matrix is not symmetric for some reason???\"\n # noinspection PyPep8Naming\n # -> because I like ``N`` better...\n N = self._kernel_mat.shape[0]\n kernel_copy = np.matrix(self._kernel_mat)\n self._kernel_mat.resize((N+1, N+1), refcheck=False)\n self._kernel_mat[:N, :N] = kernel_copy\n del kernel_copy\n\n # Computing new feature distance (histogram intersection). Only\n # need to compute this once because of HI being being\n # commutative and the kernel matrix being symmetric.\n self._log.debug(\"Adding new distance vectors...\")\n dist_vec = np.mat(tuple(\n self._histogram_intersection_distance(feature_vec, fv)\n for fv in self._feature_mat\n ))\n self._kernel_mat[-1, :] = dist_vec\n self._kernel_mat[:, -1] = dist_vec.T", "def __add__(self, other):\n if isinstance(other, Vector):\n a = self._ar + other._ar\n else:\n a = self._ar + numpy.array(other)\n return Vector(a)", "def add_vector_of_ones(X):\n m, n = X.shape\n vector_of_ones = np.ones([m, 1])\n X_with_ones = np.hstack([vector_of_ones, X])\n return X_with_ones", "def vectorAdd(a, b):\n return [a[i] + b[i] for i, j in enumerate(a)]", "def sum(self, vector):\n\n # return (self.from_list([x+vector.vector[self.vector.index(x)]\n # for x in self.vector]))\n return Vector(self.x + vector.x, self.y + vector.y, self.z + vector.z)", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]", "def rebuild_all(self, vector=None):\n if not hasattr(self, \"positions\"):\n self.load_data()\n\n if vector is not None and not isinstance(vector, np.ndarray):\n raise ValueError(\"`vector` must be a 1-d numpy array\")\n\n if vector is not None:\n assert vector.shape == self.params.shape, \\\n \"Incorrect `vector` shape. Is {}, but should be {}\".format(\n vector.shape, self.params.shape\n )\n # Copy to break references and avoid later manipulation by RNG\n self.sample = (self.rng.next() if vector is None else vector).copy()\n self.rebuild_technosphere_matrix(self.tech_sample)\n self.rebuild_biosphere_matrix(self.bio_sample)\n if self.lcia:\n self.rebuild_characterization_matrix(self.cf_sample)\n if self.weighting:\n self.weighting_value = self.weighting_sample\n\n if self.presamples:\n self.presamples.update_matrices()", "def add_new_features(self):\r\n curr_img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(curr_img)\r\n\r\n # Create a mask to avoid redetecting existing features.\r\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\r\n\r\n for feature in chain.from_iterable(self.curr_features):\r\n x, y = map(int, feature.cam0_point)\r\n mask[y-3:y+4, x-3:x+4] = 0\r\n\r\n # Detect new features.\r\n new_features = self.detector.detect(curr_img, mask=mask)\r\n\r\n # Collect the new detected features based on the grid.\r\n # Select the ones with top response within each grid afterwards.\r\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\r\n for feature in new_features:\r\n row = int(feature.pt[1] / grid_height)\r\n col = int(feature.pt[0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n new_feature_sieve[code].append(feature)\r\n\r\n new_features = []\r\n for features in new_feature_sieve:\r\n if len(features) > self.config.grid_max_feature_num:\r\n features = sorted(features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n new_features.append(features)\r\n new_features = list(chain.from_iterable(new_features))\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # if len(cam0_inliers) < max(5, len(new_features) * 0.1):\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1", "def reduce_dimensions(feature_vectors_full, model):\n\n if 'fvectors' in model:\n v = np.array(model['fvectors'])\n else:\n #Principal Components Analysis implemented from lab code\n covx = np.cov(feature_vectors_full, rowvar=0)\n N = covx.shape[0]\n w, v = scipy.linalg.eigh(covx, eigvals=(N - 40, N - 1))\n v = np.fliplr(v)\n model['fvectors'] = v.tolist()\n pca_train = np.dot((feature_vectors_full - np.mean(feature_vectors_full)), v)\n return pca_train[:,0:10]", "def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec", "def mfcc_features(self, audio, rate, numcep = 20, nfft = 2000, N = 2):\n self.mfcc = python_speech_features.mfcc(audio, rate, numcep = numcep, nfft = nfft)\n #self.mfcc = preprocessing.scale(self.mfcc)\n \n self.delta_mfcc = python_speech_features.delta(self.mfcc, N)\n \n self.mfcc_feature = np.hstack((self.mfcc, self.delta_mfcc))\n \n return self.mfcc_feature", "def assemble_vector(form: _forms, constraint: MultiPointConstraint, b: Optional[_PETSc.Vec] = None) -> _PETSc.Vec:\n\n _log.log(_log.LogLevel.INFO, \"Assemble MPC vector\")\n timer_vector = Timer(\"~MPC: Assemble vector (numba)\")\n\n # Unpack Function space data\n V = form.function_spaces[0]\n x_dofs = V.mesh.geometry.dofmap\n x = V.mesh.geometry.x\n dofs = V.dofmap.map()\n block_size = V.dofmap.index_map_bs\n\n # Data from multipointconstraint\n coefficients = constraint.coefficients()[0]\n masters_adj = constraint.masters\n c_to_s_adj = constraint.cell_to_slaves\n cell_to_slave = c_to_s_adj.array\n c_to_s_off = c_to_s_adj.offsets\n is_slave = constraint.is_slave\n mpc_data = (masters_adj.array, coefficients, masters_adj.offsets, cell_to_slave, c_to_s_off, is_slave)\n slave_cells = extract_slave_cells(c_to_s_off)\n\n # Get index map and ghost info\n if b is None:\n index_map = constraint.function_space.dofmap.index_map\n vector = _la.create_petsc_vector(index_map, block_size)\n else:\n vector = b\n\n # Pack constants and coefficients\n form_coeffs = _cpp.fem.pack_coefficients(form._cpp_object)\n form_consts = _cpp.fem.pack_constants(form._cpp_object)\n\n tdim = V.mesh.topology.dim\n num_dofs_per_element = V.dofmap.dof_layout.num_dofs\n\n # Assemble vector with all entries\n with vector.localForm() as b_local:\n _cpp.fem.assemble_vector(b_local.array_w, form._cpp_object,\n form_consts, form_coeffs)\n\n # Check if we need facet permutations\n # FIXME: access apply_dof_transformations here\n e0 = form.function_spaces[0].element\n needs_transformation_data = e0.needs_dof_transformations or form._cpp_object.needs_facet_permutations\n cell_perms = numpy.array([], dtype=numpy.uint32)\n if needs_transformation_data:\n V.mesh.topology.create_entity_permutations()\n cell_perms = V.mesh.topology.get_cell_permutation_info()\n if e0.needs_dof_transformations:\n raise NotImplementedError(\"Dof transformations not implemented\")\n # Assemble over cells\n subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.cell)\n num_cell_integrals = len(subdomain_ids)\n\n is_complex = numpy.issubdtype(_PETSc.ScalarType, numpy.complexfloating)\n nptype = \"complex128\" if is_complex else \"float64\"\n ufcx_form = form.ufcx_form\n if num_cell_integrals > 0:\n V.mesh.topology.create_entity_permutations()\n\n # NOTE: This depends on enum ordering in ufcx.h\n cell_form_pos = ufcx_form.form_integral_offsets[0]\n for i, id in enumerate(subdomain_ids):\n cell_kernel = getattr(ufcx_form.form_integrals[cell_form_pos + i], f\"tabulate_tensor_{nptype}\")\n active_cells = form._cpp_object.domains(_fem.IntegralType.cell, id)\n coeffs_i = form_coeffs[(_fem.IntegralType.cell, id)]\n with vector.localForm() as b:\n assemble_cells(numpy.asarray(b), cell_kernel, active_cells[numpy.isin(active_cells, slave_cells)],\n (x_dofs, x), coeffs_i, form_consts,\n cell_perms, dofs, block_size, num_dofs_per_element, mpc_data)\n\n # Assemble exterior facet integrals\n subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.exterior_facet)\n num_exterior_integrals = len(subdomain_ids)\n if num_exterior_integrals > 0:\n V.mesh.topology.create_entities(tdim - 1)\n V.mesh.topology.create_connectivity(tdim - 1, tdim)\n # Get facet permutations if required\n facet_perms = numpy.array([], dtype=numpy.uint8)\n if form._cpp_object.needs_facet_permutations:\n facet_perms = V.mesh.topology.get_facet_permutations()\n perm = (cell_perms, form._cpp_object.needs_facet_permutations, facet_perms)\n # NOTE: This depends on enum ordering in ufcx.h\n ext_facet_pos = ufcx_form.form_integral_offsets[1]\n for i, id in enumerate(subdomain_ids):\n facet_kernel = getattr(ufcx_form.form_integrals[ext_facet_pos + i],\n f\"tabulate_tensor_{nptype}\")\n coeffs_i = form_coeffs[(_fem.IntegralType.exterior_facet, id)]\n facets = form._cpp_object.domains(_fem.IntegralType.exterior_facet, id)\n facet_info = pack_slave_facet_info(facets, slave_cells)\n num_facets_per_cell = len(V.mesh.topology.connectivity(tdim, tdim - 1).links(0))\n with vector.localForm() as b:\n assemble_exterior_slave_facets(numpy.asarray(b), facet_kernel, facet_info, (x_dofs, x),\n coeffs_i, form_consts, perm,\n dofs, block_size, num_dofs_per_element, mpc_data, num_facets_per_cell)\n timer_vector.stop()\n return vector", "def create_bitvector(bitvector, *bits):\n if not bitvector in bitvectors:\n bitvectors[bitvector] = []\n \n if bits:\n bitvectors[bitvector].extend(bits)", "def __iadd__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n self._vectors = [a + b for a, b in zip(self, other)]\n return self\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n self._vectors = [a + b for a in self]\n return self", "def create_vectorized_features(data_dir, feature_version=2):\n extractor = PEFeatureExtractor(feature_version)\n\n print(\"Vectorizing training set\")\n X_path = os.path.join(data_dir, \"X_train.dat\")\n y_path = os.path.join(data_dir, \"y_train.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)\n\n print(\"Vectorizing test set\")\n X_path = os.path.join(data_dir, \"X_test.dat\")\n y_path = os.path.join(data_dir, \"y_test.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def addAt(self, vector, id1, id2):\n self.matrix.update_add_at(vector,\n numerix.asarray(id1, dtype='int32'),\n numerix.asarray(id2, dtype='int32'))", "def CreateVector(self) -> BaseVector:", "def numpy_vector(self):\n pass", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def vectorizer_features(self) -> list:\n if self._vectorizer:\n return self._vectorizer.get_feature_names()\n self.logger.warning('Uninitialized vector. Please call count_vectorizer first.')", "def add_vector(self, name, text, tag=None):\n words = self.clean_text_util.clean_text(text)\n \n # max{f(w,d) : w ∈ d)}\n counter = Counter(words)\n _, max_occ = counter.most_common(1)[0] \n\n # remove duplicate word\n words = set(words)\n \n items = []\n for word in words:\n pickle_wordinfo = self.dictionary_db.get(word)\n if not pickle_wordinfo:\n continue\n \n word_info = pickle.loads(pickle_wordinfo)\n\n # tf formula: tf(f,d) = f(f,d)/max{f(w,d) : w ∈ d)} (src Wikipedia)\n tf = counter[word]/float(max_occ)\n\n # create a new vector item entry\n items.append(VectorItem(word, tf))\n\n # sort the vector item by the dictionary index\n items.sort(key=lambda x: x.word_info(self.dictionary_db).index)\n\n # finally, we create a new vector\n vector = Vector(items, tag)\n self.vectors_db.add(name, pickle.dumps(vector))\n\n # add an empty entry to the norm db\n self.vectors_norm_db.add(name, self.vector_tfidf_norm(items))", "def homogeneous_vector(self, base_ring=None):\n v = list(self._vector) + [0]\n return vector(base_ring or self._base_ring, v)", "def homogeneous_vector(self, base_ring=None):\n v = list(self._vector) + [0]\n return vector(base_ring or self._base_ring, v)", "def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])", "def __iadd__( self, vector3 ):\n return self.add( vector3 )", "def create_feature_vector(features, length):\n START_IDX = 0\n END_IDX = 1\n\n output_vector = np.zeros(length)\n\n # negative strand\n for loc in features[-1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 1 \n\n # positive strand\n for loc in features[1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 2\n\n return output_vector", "def apply_optimization_vector(self, vector: np.ndarray) -> None:\n # we are going to iterate through the given vector;\n # an iterator allows us to next()\n # (aka `pop`) the values only when desired;\n # we only update the current vector where the mask is True\n vector_iterator = iter(vector)\n updated_vector = [v if not m else next(vector_iterator)\n for v, m in zip(self.vector,\n self.optimization_mask)]\n updated_links = self.array_2_links(np.array(updated_vector),\n self.convention)\n self.links = updated_links", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def basic_fields(self, basic_fields):\n\n self._basic_fields = basic_fields", "def homogeneous_vector(self, base_ring=None):\n v = list(self._vector) + [1]\n return vector(base_ring or self._base_ring, v)", "def project_vectors(self, vectors):\n return np.dot(vectors, self.components.T)", "def __add_answered_on_feature(samples: List[TrainSample], all_features: np.array) -> np.array:\n new_features = []\n for sample, features in zip(samples, all_features):\n if isinstance(sample.selected_player, bool):\n answered_on = 1.0 if sample.selected_player else 0.0\n else:\n answered_on = 1.0 if sample.selected_player in sample.answer else 0.0\n features = np.append(features, answered_on * features)\n features = np.append(features, [answered_on])\n new_features.append(features)\n return np.array(new_features)", "def update(self, learning_rate, influence, input_vector, bmu):\n factor = learning_rate * influence\n self.vector = [x + factor * (y - x)\n for x, y in zip(self.vector, input_vector)]", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def updateAttributesAfterAdding(self):\n layer = self.sender()\n while self.addedFeatures:\n featureId = self.addedFeatures.pop()\n #begining the edit command\n # layer.beginEditCommand(self.tr(\"DSG Tools reclassification tool: adjusting feature's attributes\"))\n #accessing added features\n editBuffer = layer.editBuffer()\n features = editBuffer.addedFeatures()\n for key in features.keys():\n #just checking the newly added feature, the other I don't care\n if key == featureId:\n feature = features[key]\n #setting the attributes using the reclassification dictionary\n self.setFeatureAttributes(feature, editBuffer)\n layer.endEditCommand()", "def _vectorize(self, vectorizer = None):\n\n\t\tvectorizer = vectorizer if vectorizer else self.vectorizer;\n\n\t\tself.training_set_vector = vectorizer.fit_transform(self.training_set)\n\n\t\tself.testing_set_vector = vectorizer.transform(self.testing_set)", "def _add_feature(self, feature):\n\n if feature.name in self.feature_name_index:\n logger.info(\"Feature %s already exists at %i, overwriting\" %\n (feature.name, self.feature_name_index[feature.name]))\n self.features[self.feature_name_index[feature.name]] = feature\n else:\n self.features.append(feature)\n self.feature_name_index[feature.name] = len(self.features) - 1\n logger.info(\"Adding %s to model at location %i\" % (\n feature.name, len(self.features)))\n self._add_domain_fault_above(feature)\n self._add_unconformity_above(feature)\n feature.set_model(self)", "def __add__(self, v2):\n\t\treturn Vect2D(self._vec+v2._vec)", "def feed(self, vector):\n return vector", "def _concatenate_features(features):\n pass", "def AsVector(self) -> BaseVector:", "def join_feature_vectors(vecs1, vecs2):\n if len(vecs1) != len(vecs2) or len(vecs1) == 0:\n print('Invalid number of feature vectors: ' + str(len(vecs1)) + ', ' + str(len(vecs2)))\n return\n\n number_of_vecs = len(vecs1)\n vec1_length = len(vecs1[0])\n vec2_length = len(vecs2[0])\n joined_vecs = np.zeros(shape=(number_of_vecs, vec1_length + vec2_length))\n\n for i in range(len(vecs1)):\n joined_vecs[i, 0:vec1_length] = vecs1[i]\n joined_vecs[i, vec1_length:vec1_length+vec2_length] = vecs2[i]\n return joined_vecs", "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def calculateFeatures(self, special=None):\n print(\"Computing features.\")\n if not special:\n self.feature_special = dict()\n self.feature_special['fPCA'] = None\n self.feature_special['wPCA'] = None\n else:\n self.feature_special = special\n if not 'wPCA' in special.keys():\n self.feature_special['wPCA'] = None\n if not 'fPCA' in special.keys():\n self.feature_special['fPCA'] = None\n\n self.features = []\n self.features.append(features.Feature_Peak(self))\n self.features.append(features.Feature_Energy(self))\n self.features.append(features.Feature_Time(self))\n self.features.append(features.Feature_Valley(self))\n self.features.append(features.Feature_Trough(self))\n\n if self.use_pca:\n self.features.append(\n features.Feature_PCA(self, self.feature_special['fPCA']))\n self.features.append(\n features.Feature_Waveform_PCA(self,\n self.feature_special['wPCA']))\n self.feature_special['fPCA'] = self.featureByName('fPCA').coeff\n self.feature_special['wPCA'] = self.featureByName('wPCA').coeff", "def insert(self, index, vector):\n self._vectors.insert(index, Vec2(*vector))", "def pack_features_vector(features, labels):\n features = tf.stack(list(features), axis=1)\n return features, labels", "def zStoreForwardPropagate(self, inputVector):\r\n # Preform the forward propagation through the layers\r\n # setting the output of one layer to the input of the next\r\n for layer in self.layers:\r\n inputVector = layer.zStoreForwardPropagate(inputVector)\r\n # The output of the last layer is returned \r\n return inputVector", "def GetVectorArticleInput(dico_vector_input, features):\n features_left = set(features) - set(dico_vector_input.keys())\n if len(features_left) > 0:\n sentence = \"Some features aren't in the dict:\\n\"\n raise MyException(sentence + \"{}\".format(features_left))\n vector_art = []\n other_features = ['abstract', 'syn', 'exergue', 'title', 'secTitle']\n other_features += ['subTitle', 'supTitle']\n for feature in features:\n if feature == 'nbSign':\n if dico_vector_input['nbSign'] == 0:\n print(\"NbSign == 0 l.176 - GetVectorArticleInput\")\n vector_art.append(dico_vector_input[feature])\n else:\n vector_art.append(dico_vector_input[feature])\n # Conversion des variables en indicatrices\n # Normalement plus la peine, comme déjà fait auparavant\n elif feature in other_features:\n if dico_vector_input[feature] > 0:\n vector_art.append(1)\n else:\n vector_art.append(0)\n else:\n vector_art.append(dico_vector_input[feature])\n return (dico_vector_input['melodyId'], np.array([vector_art]))", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result" ]
[ "0.5917165", "0.5735873", "0.57002974", "0.5500693", "0.54043525", "0.5337707", "0.5288758", "0.5286068", "0.5255515", "0.5167239", "0.51532346", "0.5146452", "0.5016166", "0.5007243", "0.49999866", "0.4954787", "0.49387354", "0.49348387", "0.4906069", "0.48973984", "0.48947722", "0.48843622", "0.48737946", "0.48659736", "0.48557988", "0.4851156", "0.48325557", "0.48302197", "0.48273098", "0.48233554", "0.48207214", "0.48203552", "0.4814059", "0.48084584", "0.47967294", "0.47724816", "0.47676846", "0.47674248", "0.47627968", "0.47627187", "0.47603443", "0.47562188", "0.47554553", "0.47521526", "0.4751433", "0.47508204", "0.47465366", "0.47431478", "0.47302487", "0.47209936", "0.47202104", "0.47072583", "0.46999484", "0.46943972", "0.4685744", "0.46820685", "0.46757892", "0.46717256", "0.46667406", "0.46651313", "0.46629924", "0.46547842", "0.46376854", "0.46296358", "0.46227074", "0.46171862", "0.461234", "0.45978665", "0.45971566", "0.45897746", "0.45866412", "0.45866412", "0.45842877", "0.45828158", "0.45813686", "0.45690688", "0.4568086", "0.4567427", "0.45620322", "0.4559214", "0.45486456", "0.4541927", "0.45329472", "0.4526106", "0.45153803", "0.45123315", "0.45119604", "0.4501903", "0.4495487", "0.44943866", "0.44895935", "0.44814152", "0.44804716", "0.4478129", "0.4477728", "0.44731006", "0.44698936", "0.44628683", "0.4457999", "0.44571254" ]
0.8391323
0
If there is at least an feature vector then returns it, else returns None
def get_last_feature_vectors(self): if len(self._feature_vectors): return self._feature_vectors[-1] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def get_feature_by_name(self, feature_name):\n feature_index = self.feature_name_index.get(feature_name,-1)\n if feature_index > -1:\n return self.features[feature_index]\n else:\n logger.error(\"{} does not exist!\".format(feature_name))\n return None", "def get_feature_vector(self, board):\n return self.hot_one(board)\n # return self.get_tesauro_feature_vector(self, board)", "def _get_features(task, features, model, similarity_strategy=None):\n X = []\n langs = analysis_utils.get_langs_for_task(task)\n for feature in features:\n if feature != \"size\":\n # this is a nested array\n X_feature = analysis_utils.load_lang2vec_vectors(task=task, features=feature)\n if X_feature is None:\n #continue\n return None\n if similarity_strategy != \"-\":\n # We start with similarities to english\n X_feature = [[sim] for sim in analysis_utils.compute_similarities_of_lang_vecs(X_feature, strategy=similarity_strategy)]\n elif feature == \"size\" and model == \"xlmr\":\n # this is an array, we put it in a list\n X_feature = [[size] for size in analysis_utils.xlmr_input_corpus_sizes(langs)]\n elif feature == \"size\" and model == \"mbert\":\n X_feature = [[size] for size in analysis_utils.mbert_input_corpus_sizes(langs)]\n else:\n raise ValueError()\n # we now have a feature vector for a single feature or feature set\n if len(X) == 0:\n X = np.array(X_feature)\n else:\n X = np.concatenate((X,np.array(X_feature)), axis=1)\n if len(X) == 0:\n return None\n return np.array(X, dtype=float)", "def featureByName(self, name):\n for feature in self.features:\n if feature.name == name:\n return feature\n return None", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def __getitem__(self, feat):\n # We perform the test for presence explicitly, to maintain a consistent\n # notion of len(self). If we just returned self.features[k], the\n # defaultdict self.features could self.update(k=float()), thus\n # extending self's length by one.\n return self.features[feat] if feat in self.features else 0.", "def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector", "def _get_feature(self, layer_name):\n if (\n layer_name in self.config[\"layers\"]\n and \"feature\" in self.config[\"layers\"][layer_name]\n ):\n return self.config[\"layers\"][layer_name][\"feature\"]\n else:\n return 0", "def getFeature(self, featureName):\n # loop through all the existing features\n for feature in self.features:\n # when we have a match with the name\n if featureName == feature.name:\n # return the value in the solution\n return feature\n # feature doesn't exist\n return None", "def get_vector(self, token):\n try:\n idx = self.token_to_idx[token]\n except KeyError:\n print(\"Input token <{}> is not in the model. Will return None type vector\".format(token))\n return None\n return self.embeddings_mat[idx]", "def feature_set(self) -> Optional[pulumi.Input['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def feature():\n pass", "def get_vector(self) -> Optional[List[_Score]]:\n\n if len(self._vector) is 0:\n return None\n else:\n return self._vector", "def get_feature(self, feature: FeatureName) -> FeatureInfo:\n if feature not in self.feature_map:\n state = FeatureState.Unsupported\n else:\n state = self.feature_map[feature]\n return FeatureInfo(state=state)", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value", "def parse_optional_vector(x, dtype=None):\n if x == 'none':\n return None\n else:\n return parse_vector(x, dtype)", "def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect", "def _get_relevant_features(self, X):\n if self.only_binary_features:\n feature_mask = which_columns_are_binary(X)\n else:\n feature_mask = np.ones(X.shape[1], dtype=bool)\n return feature_mask", "def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features", "def extract_single_feature_vect(gray):\n\n hist, hog_img = skimHOG(gray)\n reduced = reduce_single_vector_dimension(hist)\n\n return reduced, hog_img", "def get(self, name):\n try:\n return(self._d_features[name])\n except:\n log.error(\"Can't get feature '%s'\" % name)\n return", "def select_features(vec):\n return sorted(vec, key=vec.get, reverse=True)[\n : min(len(vec), FEATURE_LENGTH)\n ]", "def features(self) -> List[np.ndarray]:\n return None", "def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)", "def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def GetVectorArticleInput(dico_vector_input, features):\n features_left = set(features) - set(dico_vector_input.keys())\n if len(features_left) > 0:\n sentence = \"Some features aren't in the dict:\\n\"\n raise MyException(sentence + \"{}\".format(features_left))\n vector_art = []\n other_features = ['abstract', 'syn', 'exergue', 'title', 'secTitle']\n other_features += ['subTitle', 'supTitle']\n for feature in features:\n if feature == 'nbSign':\n if dico_vector_input['nbSign'] == 0:\n print(\"NbSign == 0 l.176 - GetVectorArticleInput\")\n vector_art.append(dico_vector_input[feature])\n else:\n vector_art.append(dico_vector_input[feature])\n # Conversion des variables en indicatrices\n # Normalement plus la peine, comme déjà fait auparavant\n elif feature in other_features:\n if dico_vector_input[feature] > 0:\n vector_art.append(1)\n else:\n vector_art.append(0)\n else:\n vector_art.append(dico_vector_input[feature])\n return (dico_vector_input['melodyId'], np.array([vector_art]))", "def _rf_predict(self, feature_vec):\n if feature_vec not in self.rf_cache:\n np_feature = np.array(feature_vec).reshape(1, -1)\n self.rf_cache[feature_vec] = self.predictor.predict(np_feature)[0]\n return self.rf_cache[feature_vec]", "def get_vector(self, word):\n\n if word in self.glove.stoi:\n return self.glove.vectors[self.glove.stoi[word]]\n else:\n return None", "def vectorizer_features(self) -> list:\n if self._vectorizer:\n return self._vectorizer.get_feature_names()\n self.logger.warning('Uninitialized vector. Please call count_vectorizer first.')", "def get_single_output_feature(model: BaseModel) -> BaseFeatureMixin:\n return next(iter(model.output_features.values()))", "def findFeatures(self):\n\t\tpass", "def value_head(features):\n with tf.variable_scope('critic', reuse=tf.AUTO_REUSE):\n features = tf.layers.dense(features, units=1, activation=None, name='output')\n return tf.squeeze(features, axis=-1)", "def take_some_features(data,features,given=None):\n if given is None:\n return data,features\n common,ind1,ind2=take_common_features(features,given)\n data=data[:,ind1]\n features=features[ind1]\n return data,features", "def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature", "def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features", "def num_features(self):\n if self.x is None:\n return 0\n return 1 if self.x.dim() == 1 else self.x.size(1)", "def is_feature_layer(layer):\n return getattr(layer, '_is_feature_layer', False)", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def _query(self, feature: str) -> np.ndarray:\n return np.flatnonzero(np.core.defchararray.find(self.internal_types, feature) != -1)", "def _extract_feature(element):\n features = tf.parse_single_example(\n element,\n # Defaults are not specified since both keys are required.\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'label/x': tf.FixedLenFeature([], tf.int64),\n 'label/y': tf.FixedLenFeature([], tf.int64)\n })\n return features", "def is_vector(x):\r\n return len(x.shape) == 1", "def getSentenceFeature(tokens, wordVectors, sentence):\n # Implement computation for the sentence features given a sentence. \n \n # Inputs: \n # - tokens: a dictionary that maps words to their indices in \n # the word vector list \n # - wordVectors: word vectors (each row) for all tokens \n # - sentence: a list of words in the sentence of interest \n\n # Output: \n # - sentVector: feature vector for the sentence \n\n sentence_vectors = [wordVectors[tokens[word]] for word in sentence]\n\n return sum(sentence_vectors) * 1.0 / len(sentence_vectors)", "def add_dummy_feature(X, value=...):\n ...", "def feature_set(self) -> pulumi.Output[Optional['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def features(self) -> List[np.ndarray]:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n return [d.features for d in self.data]", "def safe_compute_features(*args):\n song_id, entity_type, crop, transform_config, features_config = args\n try:\n tf.logging.info(f\"Compute features for {song_id} on segment {crop}.\")\n features,_ = compute_features(song_id, entity_type, crop, transform_config, features_config)\n return features, False\n\n except Exception as err:\n tf.logging.warn(f\"Error while computing features for {song_id} on segment {crop}: {err}\")\n return np.float32(0.0), True", "def input_fn(params, is_training):\n features = tf.constant(0)\n labels = tf.constant(0)\n\n return features, labels", "def svm_classification(self):\n\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n return None\n else:\n x = []\n y = []\n z = []\n for elem in self.current_recording:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n gesture_fft = self.get_fft(x, y, z)\n\n if len(gesture_fft) > self.cutoff_length:\n print(\"bigger than cutoff\")\n gesture_fft = gesture_fft[:self.cutoff_length]\n elif len(gesture_fft) < self.cutoff_length:\n\n print(\"smaller than cutoff\")\n temp = np.zeros(self.cutoff_length)\n for x in range(len(gesture_fft)):\n temp[x] = gesture_fft[x]\n gesture_fft = temp\n else:\n pass\n\n return self.classifier.predict(gesture_fft)", "def features(self) -> Optional[pulumi.Input['ProvisionedClustersCommonPropertiesFeaturesArgs']]:\n return pulumi.get(self, \"features\")", "def format_optional_vector(x):\n\n # If vector is None or all elements are NaN, then return none\n # Otherwise format the vector as normal\n if x is None or np.all(np.isnan(x)):\n return 'none'\n else:\n return format_vector(x)", "def _extract_feature(self,f):\n if callable(f): \n return f()\n elif type(f) == tuple:\n return f[0](*list(f[1:]))", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val", "def extract_feature(self, atom_fea, nbr_fea, nbr_fea_idx, crystal_atom_idx):\n atom_fea = self.embedding(atom_fea)\n for conv_func in self.convs:\n atom_fea = conv_func(atom_fea, nbr_fea, nbr_fea_idx)\n feature = self.pooling(atom_fea, crystal_atom_idx)\n return feature", "def mostConstrainingFeature(self):\n # keep track of which feature we'll choose next\n nextFeature = None\n # a counter for the minimum number of constraints\n maxCount = -1\n # loop through all the features\n for feature in self.features:\n # if this feature has a value then go back to the top of the loop and get\n # the next feature\n if (feature.value != 'none'):\n continue\n # get a list of all the constraints involving this feature\n constraintList = self.getOpenConstraints(feature.name)\n # compare the number of constraints involving this feature to the current max\n # if this is the first unassigned feature we found or this feature has the most\n # constraints we've found...\n if (len(constraintList) > maxCount):\n # save a pointer to the current feature with most constraints\n nextFeature = feature\n # save the max number of constraints\n maxCount = len(constraintList)\n # return the least constraining feature\n return nextFeature", "def get_feature(model, img_tensor, feature_id, device):\n mean = torch.Tensor([0.485, 0.456, 0.406]).to(device).view(1, config.channels, 1, 1)\n std = torch.Tensor([0.229, 0.224, 0.225]).to(device).view(1, config.channels, 1, 1)\n img_normalized = (img_tensor - mean) / std\n feature = model(img_normalized, feature_id)\n return feature", "def check_nullification(full_gam, feature_combination_full, threshold = 1e-7):\n\n nullified_features = []\n\n for i in range(len(feature_combination_full)):\n XX = full_gam.generate_X_grid(term=i)\n\n term_funct = full_gam.partial_dependence(term=i, X=XX)\n\n std = np.std(term_funct)# Use standard deviation to check for nullification\n\n if std < threshold:\n nullified_features.append(feature_combination_full[i])\n\n return nullified_features", "def testGetFirstFeature(self):\r\n self.prepareTestCanvas()\r\n myLayer = self.bucketFill.getActiveVectorLayer()\r\n myTestBox = QgsRectangle(TEST_BOX[0], TEST_BOX[1],\r\n TEST_BOX[2], TEST_BOX[3])\r\n\r\n myFeatureCount = myLayer.featureCount()\r\n if myFeatureCount > 0:\r\n myFeature = self.bucketFill.getFirstFeature(myLayer, myTestBox)\r\n print myFeature\r\n myMessage = ('Returned object was not a feature.')\r\n assert myFeature.type() == QgsFeature, myMessage\r\n else:\r\n myMessage = ('No features found in layer.')\r\n assert 1 == 0, myMessage", "def check_valid(feature: th.Tensor,\n num_frames: Optional[th.Tensor]) -> Tuple[th.Tensor]:\n num_nans = th.sum(th.isnan(feature))\n shape = feature.shape\n if num_nans:\n raise ValueError(f\"Detect {num_nans} NANs in feature matrices, \" +\n f\"shape = {shape}...\")\n if num_frames is not None:\n max_frames = num_frames.max().item()\n if feature.shape[-2] < max_frames:\n raise RuntimeError(f\"feats shape: {shape[-2]} x {shape[-1]}, \" +\n f\"num_frames = {num_frames.tolist()}\")\n if feature.shape[-2] > max_frames:\n feature = feature[..., :max_frames, :]\n return feature, num_frames", "def get_img_feature(self, image_id):\n self.check_img_feature_file()\n self.check_img_feature_offset_map()\n\n if image_id in self.img_feat_offset_map:\n img_offset = self.img_feat_offset_map[image_id]\n self.img_feature_file.seek(img_offset, 0)\n arr = [s.strip() for s in self.img_feature_file.readline().split('\\t')]\n num_boxes = int(arr[1])\n feat = np.frombuffer(base64.b64decode(arr[2]), dtype=np.float32).reshape((-1, self.args.img_feature_dim))\n return feat\n\n return None", "def get_other_features(self):\n return self.other_features", "def _float_feature(value):\n\treturn tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def irrelevant_features(features):\n irrelevant = []\n for vec in set(features):\n if (features[vec].count(0)/len(indtf_features[vec])) < 0.1:\n irrelevant.append(vec)\n return irrelevant", "def features(self) -> Optional[pulumi.Input['DevToolPortalFeatureSettingsArgs']]:\n return pulumi.get(self, \"features\")", "def _isFIdx(self, featureName):\n return 1 if (featureName in self.featureNames) else 0", "def select_features(data: AnnData, features: str = None) -> str:\n keyword = \"fmat_\" + str(features) # fmat: feature matrix\n\n if keyword not in data.uns:\n if features is not None:\n assert features in data.var\n fmat = data.X[:, data.var[features].values]\n else:\n fmat = data.X\n\n if issparse(fmat):\n data.uns[keyword] = fmat.toarray()\n else:\n data.uns[keyword] = fmat.copy()\n\n return keyword", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def feat():\n pass", "def feature_selection(feature_matrix, missing_threshold=90, correlation_threshold=0.95):\n \n feature_matrix = pd.get_dummies(feature_matrix)\n n_features_start = feature_matrix.shape[1]\n print('Original shape: ', feature_matrix.shape)\n\n # Find missing and percentage\n missing = pd.DataFrame(feature_matrix.isnull().sum())\n missing['percent'] = 100 * (missing[0] / feature_matrix.shape[0])\n missing.sort_values('percent', ascending = False, inplace = True)\n\n # Missing above threshold\n missing_cols = list(missing[missing['percent'] > missing_threshold].index)\n n_missing_cols = len(missing_cols)\n\n # Remove missing columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in missing_cols]]\n print('{} missing columns with threshold: {}.'.format(n_missing_cols,\n missing_threshold))\n \n # Zero variance\n unique_counts = pd.DataFrame(feature_matrix.nunique()).sort_values(0, ascending = True)\n zero_variance_cols = list(unique_counts[unique_counts[0] == 1].index)\n n_zero_variance_cols = len(zero_variance_cols)\n\n # Remove zero variance columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in zero_variance_cols]]\n print('{} zero variance columns.'.format(n_zero_variance_cols))\n \n # Correlations\n corr_matrix = feature_matrix.corr()\n\n # Extract the upper triangle of the correlation matrix\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n # Select the features with correlations above the threshold\n # Need to use the absolute value\n to_drop = [column for column in upper.columns if any(upper[column].abs() > correlation_threshold)]\n\n n_collinear = len(to_drop)\n \n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in to_drop]]\n print('{} collinear columns removed with threshold: {}.'.format(n_collinear,\n correlation_threshold))\n \n total_removed = n_missing_cols + n_zero_variance_cols + n_collinear\n \n print('Total columns removed: ', total_removed)\n print('Shape after feature selection: {}.'.format(feature_matrix.shape))\n return feature_matrix", "def get_feature_vector(self, mode=\"binary\"):\n voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)\n if mode == \"binary\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n return vector\n\n elif mode == \"binary_with_nopoints\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n tot_bounds = abs(self.bounds[0]) + abs(self.bounds[1])\n # TODO can be parallelised\n non_points = []\n for point in self.points_inside_bounds:\n start, end = get_points_from_bounds(self.bounds[0], self.bounds[1], self.origin, point)\n start_projected_voxelgrid = (start - self.bounds[0])\n end_projected_voxelgrid = (end - self.bounds[0])\n\n assert np.all(start_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(end_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(start_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid, tot_bounds)\n assert np.all(end_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid, tot_bounds)\n\n start_projected_voxelgrid = np.clip(start_projected_voxelgrid, 0, tot_bounds - PRECISION)\n end_projected_voxelgrid = np.clip(end_projected_voxelgrid, 0, tot_bounds - PRECISION)\n\n new_non_points = list(supercover_line(start_projected_voxelgrid, end_projected_voxelgrid, self.sizes))\n non_points.extend(new_non_points)\n # if not np.all(np.array(new_non_points) >= 0) or not np.all(np.array(new_non_points).max(axis=0) < vector.shape):\n # print('Non-point detected with indices under 0 or over size')\n # print('start = {}'.format(start_projected_voxelgrid))\n # print('end = {}'.format(end_projected_voxelgrid))\n # print('Max Size: {}'.format(vector.shape))\n # print('Wrong points:')\n # print(np.array(new_non_points))\n # raise Exception()\n\n # convert only cells that are 0 to -1, NOT 1 to -1\n non_points = np.unique(np.array(non_points), axis=0).astype(int)\n\n temp = vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]]\n temp[temp == 0] = -1\n vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]] = temp\n return vector\n elif mode == \"density\":\n vector = np.zeros(self.n_voxels)\n count = np.bincount(voxel_n)\n vector[:len(count)] = count\n vector /= len(voxel_n)\n vector = vector.reshape(self.x_y_z)\n return vector\n # elif mode == \"TDF\":\n # vector = np.zeros(self.n_voxels)\n # # truncation = np.linalg.norm(self.shape)\n # kdt = cKDTree(self.points_inside_bounds)\n # vector, i = kdt.query(self.voxel_centers, n_jobs=-1)\n # vector = vector.reshape(self.x_y_z)\n # return vector\n elif mode.endswith(\"_max\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_max\": 0, \"y_max\": 1, \"z_max\": 2}\n vector = groupby_max(self.points_inside_bounds, voxel_n, axis[mode], vector)\n vector = vector.reshape(self.x_y_z)\n return vector\n elif mode.endswith(\"_mean\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_mean\": 0, \"y_mean\": 1, \"z_mean\": 2}\n voxel_sum = groupby_sum(self.points_inside_bounds, voxel_n, axis[mode], np.zeros(self.n_voxels))\n voxel_count = groupby_count(self.points_inside_bounds, voxel_n, np.zeros(self.n_voxels))\n vector = np.nan_to_num(voxel_sum / voxel_count)\n vector = vector.reshape(self.x_y_z)\n return vector\n\n else:\n raise NotImplementedError(\"{} is not a supported feature vector mode\".format(mode))", "def feature_one(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 1):\n # o1.write(\"trusted\\n\")\n # else:\n # o1.write(\"unverified\\n\")\n # except:\n # o1.write(\"unverified\\n\")\n\n return tup[0] in ds[tup[1]]", "def get_vect(word, model, method):\n if method == \"model\":\n try:\n return model.wv[word]\n except KeyError:\n return None\n else:\n try:\n return model[word]\n except KeyError:\n return None", "def _handle_feature(fea):\n if len(fea.shape) == 1:\n fea = np.array([fea]).T\n\n return fea", "def features_dim(self):\n if not self.exposes_features:\n return None\n\n dim = self._features_op.outputs[0].get_shape().as_list()[-1]\n if dim is None:\n logger.warning(\n \"Unable to statically get feature dimension; returning None\"\n )\n\n return dim", "def _feature_selection(self , x ,y):\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]", "def get_selected_features(pipeline, verbose = False):\n\n \n assert isinstance(pipeline, Pipeline), \"Input isn't a Pipeline\"\n assert isinstance(pipeline[0], ColumnTransformer), \"First step isn't a ColumnTransformer\"\n\n features = get_feature_names(pipeline[0], verbose=verbose)\n\n for i, step in enumerate(pipeline.steps[1:]):\n if verbose: print(i, \": \", step[0])\n \n if hasattr(step[1], 'get_support'):\n \n check_is_fitted(step[1])\n\n retained_cols = step[1].get_support()\n if verbose: print(sum(retained_cols), \"of\", len(retained_cols), \"retained, \",\\\n round(sum(retained_cols) / len(retained_cols) * 100, 1), \"%\")\n\n features = [feature for is_retained, feature in zip(retained_cols, features) if is_retained] \n\n return features", "def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)", "def get_feature_vector(self, mode=\"binary\"):\n vector = np.zeros(self.n_voxels)\n\n if mode == \"binary\":\n vector[np.unique(self.voxel_n)] = 1\n\n elif mode == \"density\":\n count = np.bincount(self.voxel_n)\n vector[:len(count)] = count\n vector /= len(self.voxel_n)\n\n elif mode == \"TDF\":\n # truncation = np.linalg.norm(self.shape)\n kdt = cKDTree(self._points)\n vector, i = kdt.query(self.voxel_centers, n_jobs=-1)\n\n elif mode.endswith(\"_max\"):\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_max\": 0, \"y_max\": 1, \"z_max\": 2}\n vector = groupby_max(self._points, self.voxel_n, axis[mode], vector)\n\n elif mode.endswith(\"_mean\"):\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_mean\": 0, \"y_mean\": 1, \"z_mean\": 2}\n voxel_sum = groupby_sum(self._points, self.voxel_n, axis[mode], np.zeros(self.n_voxels))\n voxel_count = groupby_count(self._points, self.voxel_n, np.zeros(self.n_voxels))\n vector = np.nan_to_num(voxel_sum / voxel_count)\n\n else:\n raise NotImplementedError(\"{} is not a supported feature vector mode\".format(mode))\n\n return vector.reshape(self.x_y_z)", "def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]", "def my_featurize(apartment):\n return x, y", "def extractFeatures(image, feature_list):\n # for multiple features or color features\n #feat_vec = np.array([])\n \n # sift has 128D\n feat_vec = np.empty((0,128))\n n_channels = (image.shape[2] if len(image.shape)==3 else 1)\n \n #img_f32 = image.astype(np.float32)\n\n for feature in feature_list:\n if (feature.strip().lower() == 'dsift'):\n print \"computing dsift (dense rootSift) features\"\n dense = cv2.FeatureDetector_create(\"Dense\")\n sift = cv2.SIFT()\n if n_channels == 1:\n kp = dense.detect(image[:,:])\n # compute kp descriptors\n _,des = sift.compute(image[:,:],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n \n feat_vec = np.vstack((feat_vec, des))\n else:\n for channel in xrange(n_channels):\n kp = dense.detect(image[:,:,channel])\n _,des = sift.compute(image[:,:,channel],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n\n feat_vec = np.vstack((feat_vec, des))\n \n# if (feature.strip().lower() == 'color'):\n# print \"computing color features\"\n# # scale from 0-255 between 0 and 1\n# if args.scale == 1:\n# img_f32 /= 255.\n# \n# f_tmp = img_f32.flatten()\n# feat_vec = np.append(feat_vec, f_tmp)\n else:\n raise Exception(\"Method '%s' is not implemented!\"%(feature)) \n \n return feat_vec", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def precompute(self, features, mode, params):\n return None", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def fvector(data, method ):\n\n fv = 0\n if method['type'] == 'lbp':\n \n\n lbpkern = lbpsimple.generateKernel2()\n \n imlbp = lbpsimple.lbp2oneslice(data, lbpkern)\n\n fv,bins = lbpsimple.features(imlbp)\n\n #pdb.set_trace();\n elif method['type'] == 'hist':\n \n fv, bins = numpy.histogram( data,range(-200,2000,20))\n fv = fv[10:15]\n #fv, bins = numpy.histogram( data)\n pass\n\n else:\n raise Exception('Unknow method for feature vector: %s' %(method))\n\n return fv", "def __getitem__(self, feature_name):\n return self.get_feature_by_name(feature_name)", "def get_feature_vector_array(inst: Instance):\n\n fv = inst.get_feature_vector()\n tmp = []\n for j in range(inst.get_feature_count()):\n if fv.get_feature(j) == 1:\n tmp.append(1)\n else:\n tmp.append(0)\n return np.array(tmp)", "def _get_features_selection(self):\n self._validate_features_selection()\n if self.features_selection == \"auto\":\n if self._get_mode() == \"Explain\":\n return False\n if self._get_mode() == \"Perform\":\n return True\n if self._get_mode() == \"Compete\":\n return True\n if self._get_mode() == \"Optuna\":\n return False\n else:\n return deepcopy(self.features_selection)", "def _float_feature(value):\n if isinstance(value, list):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n else:\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n if isinstance(value, list):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _len_feature(tf_feature):\n assert(tf_feature)\n attrs = ['bytes_list', 'float_list', 'int64_list']\n for attr in attrs:\n if hasattr(tf_feature, attr):\n feature_vec = getattr(tf_feature, attr).value\n res = len(feature_vec)\n if res > 0:\n return res\n return 0", "def _validate_features_in_predict_input(self, result):\n pass", "def feature_spec(self):\n if not self.infer_without_label:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]),\n tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))]\n feature_shapes.append(tf.TensorShape([tf.Dimension(None)]))\n else:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))\n ]\n if len(feature_shapes) == 1:\n return feature_shapes[0]\n return tuple(feature_shapes)", "def get_feature_only(self, verbose = False):\n # captures the last self.num_steps + 1 frames\n # last_num_steps_frames is a SESSION\n last_num_steps_frames = self.capture_last(self.num_steps + 1)\n\n # Extract features from the last frames\n # inputs: np.array (self.num_steps, n_input)\n # because we can make last_num_steps_frames a little bit longer\n # so we just clip down a little bit\n inputs = self._get_features(last_num_steps_frames)[-self.num_steps:]\n\n return inputs", "def __tf_idf_feature_extraction(self):\n print('=' * 80)\n print(\"TF-IDF Feature Extraction\")\n t0 = time()\n vectorizer = TfidfVectorizer()\n vec_train = vectorizer.fit_transform(self.train.text)\n vec_test = vectorizer.transform(self.test.text)\n duration = time() - t0\n print(\"DONE!!!!! total time: %fs\" % duration)\n print('=' * 80)\n return vec_train, vec_test", "def __contains__(self, feature):\n return feature in self.features", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))" ]
[ "0.6076066", "0.6039801", "0.60045683", "0.5997051", "0.59677297", "0.5927279", "0.5924938", "0.592038", "0.58969414", "0.58928514", "0.5883369", "0.5868851", "0.5810611", "0.57908976", "0.57573485", "0.5748689", "0.57457215", "0.5690704", "0.5680142", "0.5675147", "0.5661629", "0.5642336", "0.5623844", "0.5604885", "0.5604279", "0.56018674", "0.5579186", "0.5577575", "0.5577575", "0.55653566", "0.55306137", "0.55241036", "0.5521289", "0.5516221", "0.5512807", "0.5512517", "0.5504548", "0.54984176", "0.547595", "0.5454508", "0.54489577", "0.54483", "0.5446874", "0.54350245", "0.53953946", "0.53846943", "0.53825283", "0.5382084", "0.53798306", "0.5360352", "0.5347559", "0.53443104", "0.53354853", "0.53227854", "0.5307955", "0.52868974", "0.5284724", "0.52706176", "0.5270413", "0.5263167", "0.5260489", "0.52483255", "0.52451235", "0.5222144", "0.521763", "0.5216146", "0.5207994", "0.52037156", "0.5203006", "0.519222", "0.51901084", "0.51895493", "0.5185788", "0.5184258", "0.5175466", "0.5171491", "0.5162965", "0.51582223", "0.51530296", "0.5146966", "0.5144051", "0.5141394", "0.5137949", "0.51241225", "0.51218253", "0.5119906", "0.5115301", "0.5114925", "0.51121116", "0.5111929", "0.5100198", "0.5094482", "0.50924677", "0.5089625", "0.5088957", "0.50882506", "0.50868905", "0.50848496", "0.5079067", "0.50790644" ]
0.6274125
0